aboutsummaryrefslogtreecommitdiff
path: root/test/codegen/simd.go
blob: b44d8b5e688a3e3c54b8decbde2e0f7ad03980f2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
// asmcheck

// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// These tests check code generation of simd peephole optimizations.

//go:build goexperiment.simd && amd64

package codegen

import (
	"math"
	"simd/archsimd"
)

func vptest1() bool {
	v1 := archsimd.LoadUint64x2Slice([]uint64{0, 1})
	v2 := archsimd.LoadUint64x2Slice([]uint64{0, 0})
	// amd64:`VPTEST (.*)(.*)$`
	// amd64:`SETCS (.*)$`
	return v1.AndNot(v2).IsZero()
}

func vptest2() bool {
	v1 := archsimd.LoadUint64x2Slice([]uint64{0, 1})
	v2 := archsimd.LoadUint64x2Slice([]uint64{0, 0})
	// amd64:`VPTEST (.*)(.*)$`
	// amd64:`SETEQ (.*)$`
	return v1.And(v2).IsZero()
}

type Args2 struct {
	V0 archsimd.Uint8x32
	V1 archsimd.Uint8x32
	x  string
}

//go:noinline
func simdStructNoSpill(a Args2) archsimd.Uint8x32 {
	// amd64:-`VMOVDQU .*$`
	return a.V0.Xor(a.V1)
}

func simdStructWrapperNoSpill(a Args2) archsimd.Uint8x32 {
	// amd64:-`VMOVDQU .*$`
	a.x = "test"
	return simdStructNoSpill(a)
}

//go:noinline
func simdArrayNoSpill(a [1]Args2) archsimd.Uint8x32 {
	// amd64:-`VMOVDQU .*$`
	return a[0].V0.Xor(a[0].V1)
}

func simdArrayWrapperNoSpill(a [1]Args2) archsimd.Uint8x32 {
	// amd64:-`VMOVDQU .*$`
	a[0].x = "test"
	return simdArrayNoSpill(a)
}

func simdFeatureGuardedMaskOpt() archsimd.Int16x16 {
	var x, y archsimd.Int16x16
	if archsimd.X86.AVX512() {
		mask := archsimd.Mask16x16FromBits(5)
		return x.Add(y).Masked(mask) // amd64:`VPADDW.Z .*$`
	}
	mask := archsimd.Mask16x16FromBits(5)
	return x.Add(y).Masked(mask) // amd64:`VPAND .*$`
}

func simdMaskedMerge() archsimd.Int16x16 {
	var x, y archsimd.Int16x16
	if archsimd.X86.AVX512() {
		mask := archsimd.Mask16x16FromBits(5)
		return x.Add(y).Merge(x, mask) // amd64:-`VPBLENDVB .*$`
	}
	mask := archsimd.Mask16x16FromBits(5)
	return x.Add(y).Merge(x, mask) // amd64:`VPBLENDVB .*$`
}

var nan = math.NaN()
var floats64s = []float64{0, 1, 2, nan, 4, nan, 6, 7, 8, 9, 10, 11, nan, 13, 14, 15}
var sinkInt64s = make([]int64, 100)

func simdIsNaN() {
	x := archsimd.LoadFloat64x4Slice(floats64s)
	y := archsimd.LoadFloat64x4Slice(floats64s[4:])
	a := x.IsNaN()
	b := y.IsNaN()
	// amd64:"VCMPPD [$]3," -"VPOR"
	c := a.Or(b)
	c.ToInt64x4().StoreSlice(sinkInt64s)
}

func simdIsNaN512() {
	x := archsimd.LoadFloat64x8Slice(floats64s)
	y := archsimd.LoadFloat64x8Slice(floats64s[8:])
	a := x.IsNaN()
	b := y.IsNaN()
	// amd64:"VCMPPD [$]3," -"VPOR"
	c := a.Or(b)
	c.ToInt64x8().StoreSlice(sinkInt64s)
}

func sftImmVPSRL() archsimd.Uint32x4 {
	var x archsimd.Uint32x4
	// amd64:`VPSRLD \$1, .*$`
	return x.ShiftAllRight(1)
}