aboutsummaryrefslogtreecommitdiff
path: root/test/codegen
diff options
context:
space:
mode:
authorDavid Chase <drchase@google.com>2025-12-08 13:24:12 -0500
committerDavid Chase <drchase@google.com>2025-12-08 13:57:44 -0800
commit144cf17d2c444a530d7c08c5870dc8e70bec2c72 (patch)
treef05f2d2883dd9914dee04552478b99cc9ecb791e /test/codegen
parent3417b48b17d01cf170317d679aef10984cc1a4d0 (diff)
downloadgo-144cf17d2c444a530d7c08c5870dc8e70bec2c72.tar.xz
[dev.simd] simd, cmd/compile: move "simd" to "simd/archsimd"
Also removes a few leftover TODOs and scraps of commented-out code from simd development. Updated etetest.sh to make it behave whether amd64 implies the experiment, or not. Fixes #76473. Change-Id: I6d9792214d7f514cb90c21b101dbf7d07c1d0e55 Reviewed-on: https://go-review.googlesource.com/c/go/+/728220 TryBot-Bypass: David Chase <drchase@google.com> Reviewed-by: Cherry Mui <cherryyz@google.com>
Diffstat (limited to 'test/codegen')
-rw-r--r--test/codegen/simd.go42
1 files changed, 21 insertions, 21 deletions
diff --git a/test/codegen/simd.go b/test/codegen/simd.go
index 63d5bf757a..8f3a1a9f46 100644
--- a/test/codegen/simd.go
+++ b/test/codegen/simd.go
@@ -10,70 +10,70 @@
package codegen
-import "simd"
+import "simd/archsimd"
func vptest1() bool {
- v1 := simd.LoadUint64x2Slice([]uint64{0, 1})
- v2 := simd.LoadUint64x2Slice([]uint64{0, 0})
+ v1 := archsimd.LoadUint64x2Slice([]uint64{0, 1})
+ v2 := archsimd.LoadUint64x2Slice([]uint64{0, 0})
// amd64:`VPTEST\s(.*)(.*)$`
// amd64:`SETCS\s(.*)$`
return v1.AndNot(v2).IsZero()
}
func vptest2() bool {
- v1 := simd.LoadUint64x2Slice([]uint64{0, 1})
- v2 := simd.LoadUint64x2Slice([]uint64{0, 0})
+ v1 := archsimd.LoadUint64x2Slice([]uint64{0, 1})
+ v2 := archsimd.LoadUint64x2Slice([]uint64{0, 0})
// amd64:`VPTEST\s(.*)(.*)$`
// amd64:`SETEQ\s(.*)$`
return v1.And(v2).IsZero()
}
type Args2 struct {
- V0 simd.Uint8x32
- V1 simd.Uint8x32
+ V0 archsimd.Uint8x32
+ V1 archsimd.Uint8x32
x string
}
//go:noinline
-func simdStructNoSpill(a Args2) simd.Uint8x32 {
+func simdStructNoSpill(a Args2) archsimd.Uint8x32 {
// amd64:-`VMOVDQU\s.*$`
return a.V0.Xor(a.V1)
}
-func simdStructWrapperNoSpill(a Args2) simd.Uint8x32 {
+func simdStructWrapperNoSpill(a Args2) archsimd.Uint8x32 {
// amd64:-`VMOVDQU\s.*$`
a.x = "test"
return simdStructNoSpill(a)
}
//go:noinline
-func simdArrayNoSpill(a [1]Args2) simd.Uint8x32 {
+func simdArrayNoSpill(a [1]Args2) archsimd.Uint8x32 {
// amd64:-`VMOVDQU\s.*$`
return a[0].V0.Xor(a[0].V1)
}
-func simdArrayWrapperNoSpill(a [1]Args2) simd.Uint8x32 {
+func simdArrayWrapperNoSpill(a [1]Args2) archsimd.Uint8x32 {
// amd64:-`VMOVDQU\s.*$`
a[0].x = "test"
return simdArrayNoSpill(a)
}
-func simdFeatureGuardedMaskOpt() simd.Int16x16 {
- var x, y simd.Int16x16
- if simd.X86.AVX512() {
- mask := simd.Mask16x16FromBits(5)
+func simdFeatureGuardedMaskOpt() archsimd.Int16x16 {
+ var x, y archsimd.Int16x16
+ if archsimd.X86.AVX512() {
+ mask := archsimd.Mask16x16FromBits(5)
return x.Add(y).Masked(mask) // amd64:`VPADDW.Z\s.*$`
}
- mask := simd.Mask16x16FromBits(5)
+ mask := archsimd.Mask16x16FromBits(5)
return x.Add(y).Masked(mask) // amd64:`VPAND\s.*$`
}
-func simdMaskedMerge() simd.Int16x16 {
- var x, y simd.Int16x16
- if simd.X86.AVX512() {
- mask := simd.Mask16x16FromBits(5)
+func simdMaskedMerge() archsimd.Int16x16 {
+ var x, y archsimd.Int16x16
+ if archsimd.X86.AVX512() {
+ mask := archsimd.Mask16x16FromBits(5)
return x.Add(y).Merge(x, mask) // amd64:-`VPBLENDVB\s.*$`
}
- mask := simd.Mask16x16FromBits(5)
+ mask := archsimd.Mask16x16FromBits(5)
return x.Add(y).Merge(x, mask) // amd64:`VPBLENDVB\s.*$`
}