aboutsummaryrefslogtreecommitdiff
path: root/test/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'test/codegen')
-rw-r--r--test/codegen/simd.go42
1 files changed, 21 insertions, 21 deletions
diff --git a/test/codegen/simd.go b/test/codegen/simd.go
index 63d5bf757a..8f3a1a9f46 100644
--- a/test/codegen/simd.go
+++ b/test/codegen/simd.go
@@ -10,70 +10,70 @@
package codegen
-import "simd"
+import "simd/archsimd"
func vptest1() bool {
- v1 := simd.LoadUint64x2Slice([]uint64{0, 1})
- v2 := simd.LoadUint64x2Slice([]uint64{0, 0})
+ v1 := archsimd.LoadUint64x2Slice([]uint64{0, 1})
+ v2 := archsimd.LoadUint64x2Slice([]uint64{0, 0})
// amd64:`VPTEST\s(.*)(.*)$`
// amd64:`SETCS\s(.*)$`
return v1.AndNot(v2).IsZero()
}
func vptest2() bool {
- v1 := simd.LoadUint64x2Slice([]uint64{0, 1})
- v2 := simd.LoadUint64x2Slice([]uint64{0, 0})
+ v1 := archsimd.LoadUint64x2Slice([]uint64{0, 1})
+ v2 := archsimd.LoadUint64x2Slice([]uint64{0, 0})
// amd64:`VPTEST\s(.*)(.*)$`
// amd64:`SETEQ\s(.*)$`
return v1.And(v2).IsZero()
}
type Args2 struct {
- V0 simd.Uint8x32
- V1 simd.Uint8x32
+ V0 archsimd.Uint8x32
+ V1 archsimd.Uint8x32
x string
}
//go:noinline
-func simdStructNoSpill(a Args2) simd.Uint8x32 {
+func simdStructNoSpill(a Args2) archsimd.Uint8x32 {
// amd64:-`VMOVDQU\s.*$`
return a.V0.Xor(a.V1)
}
-func simdStructWrapperNoSpill(a Args2) simd.Uint8x32 {
+func simdStructWrapperNoSpill(a Args2) archsimd.Uint8x32 {
// amd64:-`VMOVDQU\s.*$`
a.x = "test"
return simdStructNoSpill(a)
}
//go:noinline
-func simdArrayNoSpill(a [1]Args2) simd.Uint8x32 {
+func simdArrayNoSpill(a [1]Args2) archsimd.Uint8x32 {
// amd64:-`VMOVDQU\s.*$`
return a[0].V0.Xor(a[0].V1)
}
-func simdArrayWrapperNoSpill(a [1]Args2) simd.Uint8x32 {
+func simdArrayWrapperNoSpill(a [1]Args2) archsimd.Uint8x32 {
// amd64:-`VMOVDQU\s.*$`
a[0].x = "test"
return simdArrayNoSpill(a)
}
-func simdFeatureGuardedMaskOpt() simd.Int16x16 {
- var x, y simd.Int16x16
- if simd.X86.AVX512() {
- mask := simd.Mask16x16FromBits(5)
+func simdFeatureGuardedMaskOpt() archsimd.Int16x16 {
+ var x, y archsimd.Int16x16
+ if archsimd.X86.AVX512() {
+ mask := archsimd.Mask16x16FromBits(5)
return x.Add(y).Masked(mask) // amd64:`VPADDW.Z\s.*$`
}
- mask := simd.Mask16x16FromBits(5)
+ mask := archsimd.Mask16x16FromBits(5)
return x.Add(y).Masked(mask) // amd64:`VPAND\s.*$`
}
-func simdMaskedMerge() simd.Int16x16 {
- var x, y simd.Int16x16
- if simd.X86.AVX512() {
- mask := simd.Mask16x16FromBits(5)
+func simdMaskedMerge() archsimd.Int16x16 {
+ var x, y archsimd.Int16x16
+ if archsimd.X86.AVX512() {
+ mask := archsimd.Mask16x16FromBits(5)
return x.Add(y).Merge(x, mask) // amd64:-`VPBLENDVB\s.*$`
}
- mask := simd.Mask16x16FromBits(5)
+ mask := archsimd.Mask16x16FromBits(5)
return x.Add(y).Merge(x, mask) // amd64:`VPBLENDVB\s.*$`
}