aboutsummaryrefslogtreecommitdiff
path: root/src/simd
diff options
context:
space:
mode:
authorJunyang Shao <shaojunyang@google.com>2025-07-14 20:29:46 +0000
committerJunyang Shao <shaojunyang@google.com>2025-07-15 14:54:17 -0700
commit6d1068014168da26b2f5bcaab15a137aee4d7d05 (patch)
treed0ac9c8028532f4b6666431070a0346613fb4d3f /src/simd
parent17baae72db6f31275383ecb091ee3ec722e290ad (diff)
downloadgo-6d1068014168da26b2f5bcaab15a137aee4d7d05.tar.xz
[dev.simd] cmd/compile, simd: add Compress
This CL is generated by CL 687975. Change-Id: I21707d108773cc6d8e6f07aaed60e756faa1e6cb Reviewed-on: https://go-review.googlesource.com/c/go/+/687995 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: David Chase <drchase@google.com>
Diffstat (limited to 'src/simd')
-rw-r--r--src/simd/ops_amd64.go182
-rw-r--r--src/simd/simd_test.go10
-rw-r--r--src/simd/simd_wrapped_test.go630
3 files changed, 822 insertions, 0 deletions
diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go
index ebb626358f..7121a6d208 100644
--- a/src/simd/ops_amd64.go
+++ b/src/simd/ops_amd64.go
@@ -1084,6 +1084,188 @@ func (x Float64x4) CeilWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x8) CeilWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8
+/* Compress */
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VCOMPRESSPS, CPU Feature: AVX512F
+func (x Float32x4) Compress(mask Mask32x4) Float32x4
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VCOMPRESSPS, CPU Feature: AVX512F
+func (x Float32x8) Compress(mask Mask32x8) Float32x8
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VCOMPRESSPS, CPU Feature: AVX512F
+func (x Float32x16) Compress(mask Mask32x16) Float32x16
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VCOMPRESSPD, CPU Feature: AVX512F
+func (x Float64x2) Compress(mask Mask64x2) Float64x2
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VCOMPRESSPD, CPU Feature: AVX512F
+func (x Float64x4) Compress(mask Mask64x4) Float64x4
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VCOMPRESSPD, CPU Feature: AVX512F
+func (x Float64x8) Compress(mask Mask64x8) Float64x8
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2
+func (x Int8x16) Compress(mask Mask8x16) Int8x16
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2
+func (x Int8x32) Compress(mask Mask8x32) Int8x32
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2
+func (x Int8x64) Compress(mask Mask8x64) Int8x64
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2
+func (x Int16x8) Compress(mask Mask16x8) Int16x8
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2
+func (x Int16x16) Compress(mask Mask16x16) Int16x16
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2
+func (x Int16x32) Compress(mask Mask16x32) Int16x32
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSD, CPU Feature: AVX512F
+func (x Int32x4) Compress(mask Mask32x4) Int32x4
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSD, CPU Feature: AVX512F
+func (x Int32x8) Compress(mask Mask32x8) Int32x8
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSD, CPU Feature: AVX512F
+func (x Int32x16) Compress(mask Mask32x16) Int32x16
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSQ, CPU Feature: AVX512F
+func (x Int64x2) Compress(mask Mask64x2) Int64x2
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSQ, CPU Feature: AVX512F
+func (x Int64x4) Compress(mask Mask64x4) Int64x4
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSQ, CPU Feature: AVX512F
+func (x Int64x8) Compress(mask Mask64x8) Int64x8
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2
+func (x Uint8x16) Compress(mask Mask8x16) Uint8x16
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2
+func (x Uint8x32) Compress(mask Mask8x32) Uint8x32
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2
+func (x Uint8x64) Compress(mask Mask8x64) Uint8x64
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2
+func (x Uint16x8) Compress(mask Mask16x8) Uint16x8
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2
+func (x Uint16x16) Compress(mask Mask16x16) Uint16x16
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2
+func (x Uint16x32) Compress(mask Mask16x32) Uint16x32
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSD, CPU Feature: AVX512F
+func (x Uint32x4) Compress(mask Mask32x4) Uint32x4
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSD, CPU Feature: AVX512F
+func (x Uint32x8) Compress(mask Mask32x8) Uint32x8
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSD, CPU Feature: AVX512F
+func (x Uint32x16) Compress(mask Mask32x16) Uint32x16
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSQ, CPU Feature: AVX512F
+func (x Uint64x2) Compress(mask Mask64x2) Uint64x2
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSQ, CPU Feature: AVX512F
+func (x Uint64x4) Compress(mask Mask64x4) Uint64x4
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSQ, CPU Feature: AVX512F
+func (x Uint64x8) Compress(mask Mask64x8) Uint64x8
+
/* DiffWithCeilWithPrecision */
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go
index f1a2f11738..d7010de10a 100644
--- a/src/simd/simd_test.go
+++ b/src/simd/simd_test.go
@@ -186,6 +186,16 @@ func TestPermute2(t *testing.T) {
}
}
+func TestCompress(t *testing.T) {
+ if !simd.HasAVX512() {
+ t.Skip("Test requires HasAVX512, not available on this hardware")
+ return
+ }
+ testInt32x4Mask32x4Int32x4(t, []int32{1, 2, 3, 4},
+ []int32{0, -1, 0, -1},
+ []int32{2, 4, 0, 0}, "Compress")
+}
+
// checkInt8Slices ensures that b and a are equal, to the end of b.
// also serves to use the slices, to prevent accidental optimization.
func checkInt8Slices(t *testing.T, a, b []int8) {
diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go
index 29452bdad0..8f0fb665be 100644
--- a/src/simd/simd_wrapped_test.go
+++ b/src/simd/simd_wrapped_test.go
@@ -117,6 +117,27 @@ func testFloat32x4Compare(t *testing.T, v0 []float32, v1 []float32, want []int32
}
}
+func testFloat32x4Mask32x4Float32x4(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) {
+ t.Helper()
+ var gotv simd.Float32x4
+ got := make([]float32, len(want))
+ vec0 := simd.LoadFloat32x4Slice(v0)
+ vec1 := simd.LoadInt32x4Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x4())
+
+ default:
+ t.Errorf("Unknown method: Float32x4.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testFloat32x4MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x4
@@ -369,6 +390,27 @@ func testFloat32x8Compare(t *testing.T, v0 []float32, v1 []float32, want []int32
}
}
+func testFloat32x8Mask32x8Float32x8(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) {
+ t.Helper()
+ var gotv simd.Float32x8
+ got := make([]float32, len(want))
+ vec0 := simd.LoadFloat32x8Slice(v0)
+ vec1 := simd.LoadInt32x8Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x8())
+
+ default:
+ t.Errorf("Unknown method: Float32x8.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testFloat32x8MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x8
@@ -613,6 +655,27 @@ func testFloat32x16Compare(t *testing.T, v0 []float32, v1 []float32, want []int3
}
}
+func testFloat32x16Mask32x16Float32x16(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) {
+ t.Helper()
+ var gotv simd.Float32x16
+ got := make([]float32, len(want))
+ vec0 := simd.LoadFloat32x16Slice(v0)
+ vec1 := simd.LoadInt32x16Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x16())
+
+ default:
+ t.Errorf("Unknown method: Float32x16.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testFloat32x16MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x16
@@ -857,6 +920,27 @@ func testFloat64x2Compare(t *testing.T, v0 []float64, v1 []float64, want []int64
}
}
+func testFloat64x2Mask64x2Float64x2(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) {
+ t.Helper()
+ var gotv simd.Float64x2
+ got := make([]float64, len(want))
+ vec0 := simd.LoadFloat64x2Slice(v0)
+ vec1 := simd.LoadInt64x2Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x2())
+
+ default:
+ t.Errorf("Unknown method: Float64x2.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testFloat64x2MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x2
@@ -1107,6 +1191,27 @@ func testFloat64x4Compare(t *testing.T, v0 []float64, v1 []float64, want []int64
}
}
+func testFloat64x4Mask64x4Float64x4(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) {
+ t.Helper()
+ var gotv simd.Float64x4
+ got := make([]float64, len(want))
+ vec0 := simd.LoadFloat64x4Slice(v0)
+ vec1 := simd.LoadInt64x4Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x4())
+
+ default:
+ t.Errorf("Unknown method: Float64x4.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testFloat64x4MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x4
@@ -1351,6 +1456,27 @@ func testFloat64x8Compare(t *testing.T, v0 []float64, v1 []float64, want []int64
}
}
+func testFloat64x8Mask64x8Float64x8(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) {
+ t.Helper()
+ var gotv simd.Float64x8
+ got := make([]float64, len(want))
+ vec0 := simd.LoadFloat64x8Slice(v0)
+ vec1 := simd.LoadInt64x8Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x8())
+
+ default:
+ t.Errorf("Unknown method: Float64x8.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testFloat64x8MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x8
@@ -1591,6 +1717,27 @@ func testInt8x16Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s
}
}
+func testInt8x16Mask8x16Int8x16(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) {
+ t.Helper()
+ var gotv simd.Int8x16
+ got := make([]int8, len(want))
+ vec0 := simd.LoadInt8x16Slice(v0)
+ vec1 := simd.LoadInt8x16Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask8x16())
+
+ default:
+ t.Errorf("Unknown method: Int8x16.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt8x16MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) {
t.Helper()
var gotv simd.Int8x16
@@ -1772,6 +1919,27 @@ func testInt8x32Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s
}
}
+func testInt8x32Mask8x32Int8x32(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) {
+ t.Helper()
+ var gotv simd.Int8x32
+ got := make([]int8, len(want))
+ vec0 := simd.LoadInt8x32Slice(v0)
+ vec1 := simd.LoadInt8x32Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask8x32())
+
+ default:
+ t.Errorf("Unknown method: Int8x32.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) {
t.Helper()
var gotv simd.Int8x32
@@ -1943,6 +2111,27 @@ func testInt8x64Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s
}
}
+func testInt8x64Mask8x64Int8x64(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) {
+ t.Helper()
+ var gotv simd.Int8x64
+ got := make([]int8, len(want))
+ vec0 := simd.LoadInt8x64Slice(v0)
+ vec1 := simd.LoadInt8x64Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask8x64())
+
+ default:
+ t.Errorf("Unknown method: Int8x64.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt8x64MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) {
t.Helper()
var gotv simd.Int8x64
@@ -2191,6 +2380,27 @@ func testInt16x8Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, whic
}
}
+func testInt16x8Mask16x8Int16x8(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) {
+ t.Helper()
+ var gotv simd.Int16x8
+ got := make([]int16, len(want))
+ vec0 := simd.LoadInt16x8Slice(v0)
+ vec1 := simd.LoadInt16x8Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask16x8())
+
+ default:
+ t.Errorf("Unknown method: Int16x8.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt16x8MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) {
t.Helper()
var gotv simd.Int16x8
@@ -2488,6 +2698,27 @@ func testInt16x16Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, whi
}
}
+func testInt16x16Mask16x16Int16x16(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) {
+ t.Helper()
+ var gotv simd.Int16x16
+ got := make([]int16, len(want))
+ vec0 := simd.LoadInt16x16Slice(v0)
+ vec1 := simd.LoadInt16x16Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask16x16())
+
+ default:
+ t.Errorf("Unknown method: Int16x16.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) {
t.Helper()
var gotv simd.Int16x16
@@ -2767,6 +2998,27 @@ func testInt16x32Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, whi
}
}
+func testInt16x32Mask16x32Int16x32(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) {
+ t.Helper()
+ var gotv simd.Int16x32
+ got := make([]int16, len(want))
+ vec0 := simd.LoadInt16x32Slice(v0)
+ vec1 := simd.LoadInt16x32Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask16x32())
+
+ default:
+ t.Errorf("Unknown method: Int16x32.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) {
t.Helper()
var gotv simd.Int16x32
@@ -3091,6 +3343,27 @@ func testInt32x4Int16x8Int16x8Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []int
}
}
+func testInt32x4Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) {
+ t.Helper()
+ var gotv simd.Int32x4
+ got := make([]int32, len(want))
+ vec0 := simd.LoadInt32x4Slice(v0)
+ vec1 := simd.LoadInt32x4Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x4())
+
+ default:
+ t.Errorf("Unknown method: Int32x4.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt32x4MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x4
@@ -3464,6 +3737,27 @@ func testInt32x8Int16x16Int16x16Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []i
}
}
+func testInt32x8Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) {
+ t.Helper()
+ var gotv simd.Int32x8
+ got := make([]int32, len(want))
+ vec0 := simd.LoadInt32x8Slice(v0)
+ vec1 := simd.LoadInt32x8Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x8())
+
+ default:
+ t.Errorf("Unknown method: Int32x8.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt32x8MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x8
@@ -3810,6 +4104,27 @@ func testInt32x16Int16x32Int16x32Mask32x16Int32x16(t *testing.T, v0 []int32, v1
}
}
+func testInt32x16Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) {
+ t.Helper()
+ var gotv simd.Int32x16
+ got := make([]int32, len(want))
+ vec0 := simd.LoadInt32x16Slice(v0)
+ vec1 := simd.LoadInt32x16Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x16())
+
+ default:
+ t.Errorf("Unknown method: Int32x16.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x16
@@ -4111,6 +4426,27 @@ func testInt64x2Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, whic
}
}
+func testInt64x2Mask64x2Int64x2(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) {
+ t.Helper()
+ var gotv simd.Int64x2
+ got := make([]int64, len(want))
+ vec0 := simd.LoadInt64x2Slice(v0)
+ vec1 := simd.LoadInt64x2Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x2())
+
+ default:
+ t.Errorf("Unknown method: Int64x2.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x2
@@ -4363,6 +4699,27 @@ func testInt64x4Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, whic
}
}
+func testInt64x4Mask64x4Int64x4(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) {
+ t.Helper()
+ var gotv simd.Int64x4
+ got := make([]int64, len(want))
+ vec0 := simd.LoadInt64x4Slice(v0)
+ vec1 := simd.LoadInt64x4Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x4())
+
+ default:
+ t.Errorf("Unknown method: Int64x4.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt64x4MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x4
@@ -4615,6 +4972,27 @@ func testInt64x8Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, whic
}
}
+func testInt64x8Mask64x8Int64x8(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) {
+ t.Helper()
+ var gotv simd.Int64x8
+ got := make([]int64, len(want))
+ vec0 := simd.LoadInt64x8Slice(v0)
+ vec1 := simd.LoadInt64x8Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x8())
+
+ default:
+ t.Errorf("Unknown method: Int64x8.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt64x8MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x8
@@ -4894,6 +5272,27 @@ func testUint8x16Int8x16Mask16x8Int16x8(t *testing.T, v0 []uint8, v1 []int8, v2
}
}
+func testUint8x16Mask8x16Uint8x16(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) {
+ t.Helper()
+ var gotv simd.Uint8x16
+ got := make([]uint8, len(want))
+ vec0 := simd.LoadUint8x16Slice(v0)
+ vec1 := simd.LoadInt8x16Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask8x16())
+
+ default:
+ t.Errorf("Unknown method: Uint8x16.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint8x16MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) {
t.Helper()
var gotv simd.Int8x16
@@ -5120,6 +5519,27 @@ func testUint8x32Int8x32Mask16x16Int16x16(t *testing.T, v0 []uint8, v1 []int8, v
}
}
+func testUint8x32Mask8x32Uint8x32(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) {
+ t.Helper()
+ var gotv simd.Uint8x32
+ got := make([]uint8, len(want))
+ vec0 := simd.LoadUint8x32Slice(v0)
+ vec1 := simd.LoadInt8x32Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask8x32())
+
+ default:
+ t.Errorf("Unknown method: Uint8x32.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint8x32MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) {
t.Helper()
var gotv simd.Int8x32
@@ -5338,6 +5758,27 @@ func testUint8x64Int8x64Mask16x32Int16x32(t *testing.T, v0 []uint8, v1 []int8, v
}
}
+func testUint8x64Mask8x64Uint8x64(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) {
+ t.Helper()
+ var gotv simd.Uint8x64
+ got := make([]uint8, len(want))
+ vec0 := simd.LoadUint8x64Slice(v0)
+ vec1 := simd.LoadInt8x64Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask8x64())
+
+ default:
+ t.Errorf("Unknown method: Uint8x64.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint8x64MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) {
t.Helper()
var gotv simd.Int8x64
@@ -5533,6 +5974,27 @@ func testUint16x8Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, w
}
}
+func testUint16x8Mask16x8Uint16x8(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) {
+ t.Helper()
+ var gotv simd.Uint16x8
+ got := make([]uint16, len(want))
+ vec0 := simd.LoadUint16x8Slice(v0)
+ vec1 := simd.LoadInt16x8Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask16x8())
+
+ default:
+ t.Errorf("Unknown method: Uint16x8.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) {
t.Helper()
var gotv simd.Int16x8
@@ -5777,6 +6239,27 @@ func testUint16x16Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16,
}
}
+func testUint16x16Mask16x16Uint16x16(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) {
+ t.Helper()
+ var gotv simd.Uint16x16
+ got := make([]uint16, len(want))
+ vec0 := simd.LoadUint16x16Slice(v0)
+ vec1 := simd.LoadInt16x16Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask16x16())
+
+ default:
+ t.Errorf("Unknown method: Uint16x16.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) {
t.Helper()
var gotv simd.Int16x16
@@ -6009,6 +6492,27 @@ func testUint16x32Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16,
}
}
+func testUint16x32Mask16x32Uint16x32(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) {
+ t.Helper()
+ var gotv simd.Uint16x32
+ got := make([]uint16, len(want))
+ vec0 := simd.LoadUint16x32Slice(v0)
+ vec1 := simd.LoadInt16x32Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask16x32())
+
+ default:
+ t.Errorf("Unknown method: Uint16x32.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) {
t.Helper()
var gotv simd.Int16x32
@@ -6274,6 +6778,27 @@ func testUint32x4Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, w
}
}
+func testUint32x4Mask32x4Uint32x4(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) {
+ t.Helper()
+ var gotv simd.Uint32x4
+ got := make([]uint32, len(want))
+ vec0 := simd.LoadUint32x4Slice(v0)
+ vec1 := simd.LoadInt32x4Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x4())
+
+ default:
+ t.Errorf("Unknown method: Uint32x4.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint32x4MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x4
@@ -6588,6 +7113,27 @@ func testUint32x8Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, w
}
}
+func testUint32x8Mask32x8Uint32x8(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) {
+ t.Helper()
+ var gotv simd.Uint32x8
+ got := make([]uint32, len(want))
+ vec0 := simd.LoadUint32x8Slice(v0)
+ vec1 := simd.LoadInt32x8Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x8())
+
+ default:
+ t.Errorf("Unknown method: Uint32x8.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint32x8MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x8
@@ -6877,6 +7423,27 @@ func testUint32x16Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32,
}
}
+func testUint32x16Mask32x16Uint32x16(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) {
+ t.Helper()
+ var gotv simd.Uint32x16
+ got := make([]uint32, len(want))
+ vec0 := simd.LoadUint32x16Slice(v0)
+ vec1 := simd.LoadInt32x16Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x16())
+
+ default:
+ t.Errorf("Unknown method: Uint32x16.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint32x16MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x16
@@ -7170,6 +7737,27 @@ func testUint64x2Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, w
}
}
+func testUint64x2Mask64x2Uint64x2(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) {
+ t.Helper()
+ var gotv simd.Uint64x2
+ got := make([]uint64, len(want))
+ vec0 := simd.LoadUint64x2Slice(v0)
+ vec1 := simd.LoadInt64x2Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x2())
+
+ default:
+ t.Errorf("Unknown method: Uint64x2.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint64x2MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x2
@@ -7414,6 +8002,27 @@ func testUint64x4Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, w
}
}
+func testUint64x4Mask64x4Uint64x4(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) {
+ t.Helper()
+ var gotv simd.Uint64x4
+ got := make([]uint64, len(want))
+ vec0 := simd.LoadUint64x4Slice(v0)
+ vec1 := simd.LoadInt64x4Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x4())
+
+ default:
+ t.Errorf("Unknown method: Uint64x4.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint64x4MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x4
@@ -7658,6 +8267,27 @@ func testUint64x8Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, w
}
}
+func testUint64x8Mask64x8Uint64x8(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) {
+ t.Helper()
+ var gotv simd.Uint64x8
+ got := make([]uint64, len(want))
+ vec0 := simd.LoadUint64x8Slice(v0)
+ vec1 := simd.LoadInt64x8Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x8())
+
+ default:
+ t.Errorf("Unknown method: Uint64x8.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint64x8MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x8