1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s --mattr=+sve -o - | FileCheck %s
4 target triple = "aarch64-unknown-linux-gnu"
7 define <vscale x 4 x double> @mull_add(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c) {
8 ; CHECK-LABEL: mull_add:
9 ; CHECK: // %bb.0: // %entry
10 ; CHECK-NEXT: uzp1 z6.d, z0.d, z1.d
11 ; CHECK-NEXT: uzp2 z7.d, z2.d, z3.d
12 ; CHECK-NEXT: uzp2 z0.d, z0.d, z1.d
13 ; CHECK-NEXT: ptrue p0.d
14 ; CHECK-NEXT: uzp1 z1.d, z2.d, z3.d
15 ; CHECK-NEXT: fmul z2.d, z6.d, z7.d
16 ; CHECK-NEXT: fmul z3.d, z0.d, z7.d
17 ; CHECK-NEXT: fmad z0.d, p0/m, z1.d, z2.d
18 ; CHECK-NEXT: fnmsb z1.d, p0/m, z6.d, z3.d
19 ; CHECK-NEXT: uzp2 z2.d, z4.d, z5.d
20 ; CHECK-NEXT: uzp1 z3.d, z4.d, z5.d
21 ; CHECK-NEXT: fadd z2.d, z0.d, z2.d
22 ; CHECK-NEXT: fadd z1.d, z3.d, z1.d
23 ; CHECK-NEXT: zip1 z0.d, z1.d, z2.d
24 ; CHECK-NEXT: zip2 z1.d, z1.d, z2.d
27 %strided.vec = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %a)
28 %0 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 0
29 %1 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 1
30 %strided.vec29 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %b)
31 %2 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec29, 0
32 %3 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec29, 1
33 %4 = fmul contract <vscale x 2 x double> %0, %3
34 %5 = fmul contract <vscale x 2 x double> %1, %2
35 %6 = fadd contract <vscale x 2 x double> %5, %4
36 %7 = fmul contract <vscale x 2 x double> %0, %2
37 %8 = fmul contract <vscale x 2 x double> %1, %3
38 %9 = fsub contract <vscale x 2 x double> %7, %8
39 %strided.vec31 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %c)
40 %10 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec31, 0
41 %11 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec31, 1
42 %12 = fadd contract <vscale x 2 x double> %10, %9
43 %13 = fadd contract <vscale x 2 x double> %6, %11
44 %interleaved.vec = tail call <vscale x 4 x double> @llvm.experimental.vector.interleave2.nxv4f64(<vscale x 2 x double> %12, <vscale x 2 x double> %13)
45 ret <vscale x 4 x double> %interleaved.vec
49 define <vscale x 4 x double> @mul_add_mull(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x double> %d) {
50 ; CHECK-LABEL: mul_add_mull:
51 ; CHECK: // %bb.0: // %entry
52 ; CHECK-NEXT: ptrue p0.d
53 ; CHECK-NEXT: mov z24.d, #0 // =0x0
54 ; CHECK-NEXT: mov z25.d, z24.d
55 ; CHECK-NEXT: mov z26.d, z24.d
56 ; CHECK-NEXT: mov z27.d, z24.d
57 ; CHECK-NEXT: fcmla z25.d, p0/m, z2.d, z0.d, #0
58 ; CHECK-NEXT: fcmla z26.d, p0/m, z3.d, z1.d, #0
59 ; CHECK-NEXT: fcmla z27.d, p0/m, z6.d, z4.d, #0
60 ; CHECK-NEXT: fcmla z24.d, p0/m, z7.d, z5.d, #0
61 ; CHECK-NEXT: fcmla z25.d, p0/m, z2.d, z0.d, #90
62 ; CHECK-NEXT: fcmla z26.d, p0/m, z3.d, z1.d, #90
63 ; CHECK-NEXT: fcmla z27.d, p0/m, z6.d, z4.d, #90
64 ; CHECK-NEXT: fcmla z24.d, p0/m, z7.d, z5.d, #90
65 ; CHECK-NEXT: fadd z0.d, z25.d, z27.d
66 ; CHECK-NEXT: fadd z1.d, z26.d, z24.d
69 %strided.vec = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %a)
70 %0 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 0
71 %1 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 1
72 %strided.vec52 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %b)
73 %2 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec52, 0
74 %3 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec52, 1
75 %4 = fmul contract <vscale x 2 x double> %0, %3
76 %5 = fmul contract <vscale x 2 x double> %1, %2
77 %6 = fadd contract <vscale x 2 x double> %5, %4
78 %7 = fmul contract <vscale x 2 x double> %0, %2
79 %8 = fmul contract <vscale x 2 x double> %1, %3
80 %9 = fsub contract <vscale x 2 x double> %7, %8
81 %strided.vec54 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %c)
82 %10 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec54, 0
83 %11 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec54, 1
84 %strided.vec56 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %d)
85 %12 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec56, 0
86 %13 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec56, 1
87 %14 = fmul contract <vscale x 2 x double> %10, %13
88 %15 = fmul contract <vscale x 2 x double> %11, %12
89 %16 = fadd contract <vscale x 2 x double> %15, %14
90 %17 = fmul contract <vscale x 2 x double> %10, %12
91 %18 = fmul contract <vscale x 2 x double> %11, %13
92 %19 = fsub contract <vscale x 2 x double> %17, %18
93 %20 = fadd contract <vscale x 2 x double> %9, %19
94 %21 = fadd contract <vscale x 2 x double> %6, %16
95 %interleaved.vec = tail call <vscale x 4 x double> @llvm.experimental.vector.interleave2.nxv4f64(<vscale x 2 x double> %20, <vscale x 2 x double> %21)
96 ret <vscale x 4 x double> %interleaved.vec
100 define <vscale x 4 x double> @mul_sub_mull(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x double> %d) {
101 ; CHECK-LABEL: mul_sub_mull:
102 ; CHECK: // %bb.0: // %entry
103 ; CHECK-NEXT: ptrue p0.d
104 ; CHECK-NEXT: mov z24.d, #0 // =0x0
105 ; CHECK-NEXT: mov z25.d, z24.d
106 ; CHECK-NEXT: mov z26.d, z24.d
107 ; CHECK-NEXT: mov z27.d, z24.d
108 ; CHECK-NEXT: fcmla z25.d, p0/m, z2.d, z0.d, #0
109 ; CHECK-NEXT: fcmla z26.d, p0/m, z3.d, z1.d, #0
110 ; CHECK-NEXT: fcmla z27.d, p0/m, z6.d, z4.d, #0
111 ; CHECK-NEXT: fcmla z24.d, p0/m, z7.d, z5.d, #0
112 ; CHECK-NEXT: fcmla z25.d, p0/m, z2.d, z0.d, #90
113 ; CHECK-NEXT: fcmla z26.d, p0/m, z3.d, z1.d, #90
114 ; CHECK-NEXT: fcmla z27.d, p0/m, z6.d, z4.d, #90
115 ; CHECK-NEXT: fcmla z24.d, p0/m, z7.d, z5.d, #90
116 ; CHECK-NEXT: fsub z0.d, z25.d, z27.d
117 ; CHECK-NEXT: fsub z1.d, z26.d, z24.d
120 %strided.vec = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %a)
121 %0 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 0
122 %1 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 1
123 %strided.vec52 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %b)
124 %2 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec52, 0
125 %3 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec52, 1
126 %4 = fmul contract <vscale x 2 x double> %0, %3
127 %5 = fmul contract <vscale x 2 x double> %1, %2
128 %6 = fadd contract <vscale x 2 x double> %5, %4
129 %7 = fmul contract <vscale x 2 x double> %0, %2
130 %8 = fmul contract <vscale x 2 x double> %1, %3
131 %9 = fsub contract <vscale x 2 x double> %7, %8
132 %strided.vec54 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %c)
133 %10 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec54, 0
134 %11 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec54, 1
135 %strided.vec56 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %d)
136 %12 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec56, 0
137 %13 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec56, 1
138 %14 = fmul contract <vscale x 2 x double> %10, %13
139 %15 = fmul contract <vscale x 2 x double> %11, %12
140 %16 = fadd contract <vscale x 2 x double> %15, %14
141 %17 = fmul contract <vscale x 2 x double> %10, %12
142 %18 = fmul contract <vscale x 2 x double> %11, %13
143 %19 = fsub contract <vscale x 2 x double> %17, %18
144 %20 = fsub contract <vscale x 2 x double> %9, %19
145 %21 = fsub contract <vscale x 2 x double> %6, %16
146 %interleaved.vec = tail call <vscale x 4 x double> @llvm.experimental.vector.interleave2.nxv4f64(<vscale x 2 x double> %20, <vscale x 2 x double> %21)
147 ret <vscale x 4 x double> %interleaved.vec
150 ; a * b + conj(c) * d
151 define <vscale x 4 x double> @mul_conj_mull(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x double> %d) {
152 ; CHECK-LABEL: mul_conj_mull:
153 ; CHECK: // %bb.0: // %entry
154 ; CHECK-NEXT: ptrue p0.d
155 ; CHECK-NEXT: mov z24.d, #0 // =0x0
156 ; CHECK-NEXT: mov z25.d, z24.d
157 ; CHECK-NEXT: mov z26.d, z24.d
158 ; CHECK-NEXT: mov z27.d, z24.d
159 ; CHECK-NEXT: fcmla z25.d, p0/m, z2.d, z0.d, #0
160 ; CHECK-NEXT: fcmla z26.d, p0/m, z3.d, z1.d, #0
161 ; CHECK-NEXT: fcmla z27.d, p0/m, z4.d, z6.d, #0
162 ; CHECK-NEXT: fcmla z24.d, p0/m, z5.d, z7.d, #0
163 ; CHECK-NEXT: fcmla z25.d, p0/m, z2.d, z0.d, #90
164 ; CHECK-NEXT: fcmla z26.d, p0/m, z3.d, z1.d, #90
165 ; CHECK-NEXT: fcmla z27.d, p0/m, z4.d, z6.d, #270
166 ; CHECK-NEXT: fcmla z24.d, p0/m, z5.d, z7.d, #270
167 ; CHECK-NEXT: fadd z0.d, z25.d, z27.d
168 ; CHECK-NEXT: fadd z1.d, z26.d, z24.d
171 %strided.vec = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %a)
172 %0 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 0
173 %1 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 1
174 %strided.vec60 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %b)
175 %2 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec60, 0
176 %3 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec60, 1
177 %4 = fmul contract <vscale x 2 x double> %0, %3
178 %5 = fmul contract <vscale x 2 x double> %1, %2
179 %6 = fadd contract <vscale x 2 x double> %5, %4
180 %7 = fmul contract <vscale x 2 x double> %0, %2
181 %8 = fmul contract <vscale x 2 x double> %1, %3
182 %9 = fsub contract <vscale x 2 x double> %7, %8
183 %strided.vec62 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %c)
184 %10 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec62, 0
185 %11 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec62, 1
186 %strided.vec64 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %d)
187 %12 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec64, 0
188 %13 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec64, 1
189 %14 = fmul contract <vscale x 2 x double> %10, %13
190 %15 = fmul contract <vscale x 2 x double> %11, %12
191 %16 = fsub contract <vscale x 2 x double> %14, %15
192 %17 = fmul contract <vscale x 2 x double> %10, %12
193 %18 = fmul contract <vscale x 2 x double> %11, %13
194 %19 = fadd contract <vscale x 2 x double> %17, %18
195 %20 = fadd contract <vscale x 2 x double> %9, %19
196 %21 = fadd contract <vscale x 2 x double> %6, %16
197 %interleaved.vec = tail call <vscale x 4 x double> @llvm.experimental.vector.interleave2.nxv4f64(<vscale x 2 x double> %20, <vscale x 2 x double> %21)
198 ret <vscale x 4 x double> %interleaved.vec
202 define <vscale x 4 x double> @mul_add_rot_mull(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x double> %d) {
203 ; CHECK-LABEL: mul_add_rot_mull:
204 ; CHECK: // %bb.0: // %entry
205 ; CHECK-NEXT: uzp2 z24.d, z4.d, z5.d
206 ; CHECK-NEXT: mov z25.d, #0 // =0x0
207 ; CHECK-NEXT: uzp1 z4.d, z4.d, z5.d
208 ; CHECK-NEXT: ptrue p0.d
209 ; CHECK-NEXT: and z25.d, z25.d, #0x7fffffffffffffff
210 ; CHECK-NEXT: mov z26.d, z24.d
211 ; CHECK-NEXT: and z26.d, z26.d, #0x8000000000000000
212 ; CHECK-NEXT: orr z5.d, z25.d, z26.d
213 ; CHECK-NEXT: fadd z5.d, z4.d, z5.d
214 ; CHECK-NEXT: and z4.d, z4.d, #0x8000000000000000
215 ; CHECK-NEXT: orr z4.d, z25.d, z4.d
216 ; CHECK-NEXT: uzp2 z25.d, z0.d, z1.d
217 ; CHECK-NEXT: uzp1 z0.d, z0.d, z1.d
218 ; CHECK-NEXT: uzp2 z1.d, z2.d, z3.d
219 ; CHECK-NEXT: uzp1 z2.d, z2.d, z3.d
220 ; CHECK-NEXT: fsub z4.d, z4.d, z24.d
221 ; CHECK-NEXT: uzp2 z24.d, z6.d, z7.d
222 ; CHECK-NEXT: uzp1 z6.d, z6.d, z7.d
223 ; CHECK-NEXT: fmul z3.d, z0.d, z1.d
224 ; CHECK-NEXT: fmul z1.d, z25.d, z1.d
225 ; CHECK-NEXT: fmul z7.d, z4.d, z24.d
226 ; CHECK-NEXT: fmul z24.d, z5.d, z24.d
227 ; CHECK-NEXT: fmla z3.d, p0/m, z25.d, z2.d
228 ; CHECK-NEXT: fnmsb z0.d, p0/m, z2.d, z1.d
229 ; CHECK-NEXT: movprfx z1, z7
230 ; CHECK-NEXT: fmla z1.d, p0/m, z6.d, z5.d
231 ; CHECK-NEXT: movprfx z2, z24
232 ; CHECK-NEXT: fnmls z2.d, p0/m, z4.d, z6.d
233 ; CHECK-NEXT: fadd z2.d, z0.d, z2.d
234 ; CHECK-NEXT: fadd z1.d, z3.d, z1.d
235 ; CHECK-NEXT: zip1 z0.d, z2.d, z1.d
236 ; CHECK-NEXT: zip2 z1.d, z2.d, z1.d
239 %strided.vec = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %a)
240 %0 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 0
241 %1 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 1
242 %strided.vec78 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %b)
243 %2 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec78, 0
244 %3 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec78, 1
245 %4 = fmul contract <vscale x 2 x double> %0, %3
246 %5 = fmul contract <vscale x 2 x double> %1, %2
247 %6 = fadd contract <vscale x 2 x double> %5, %4
248 %7 = fmul contract <vscale x 2 x double> %0, %2
249 %8 = fmul contract <vscale x 2 x double> %1, %3
250 %9 = fsub contract <vscale x 2 x double> %7, %8
251 %strided.vec80 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %c)
252 %10 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec80, 0
253 %11 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec80, 1
254 %12 = tail call contract <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x double> %11)
255 %13 = fadd contract <vscale x 2 x double> %10, %12
256 %14 = tail call contract <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x double> %10)
257 %15 = fsub contract <vscale x 2 x double> %14, %11
258 %strided.vec82 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %d)
259 %16 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec82, 0
260 %17 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec82, 1
261 %18 = fmul contract <vscale x 2 x double> %15, %17
262 %19 = fmul contract <vscale x 2 x double> %16, %13
263 %20 = fadd contract <vscale x 2 x double> %19, %18
264 %21 = fmul contract <vscale x 2 x double> %15, %16
265 %22 = fmul contract <vscale x 2 x double> %13, %17
266 %23 = fsub contract <vscale x 2 x double> %21, %22
267 %24 = fadd contract <vscale x 2 x double> %9, %23
268 %25 = fadd contract <vscale x 2 x double> %6, %20
269 %interleaved.vec = tail call <vscale x 4 x double> @llvm.experimental.vector.interleave2.nxv4f64(<vscale x 2 x double> %24, <vscale x 2 x double> %25)
270 ret <vscale x 4 x double> %interleaved.vec
273 declare { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double>)
274 declare <vscale x 4 x double> @llvm.experimental.vector.interleave2.nxv4f64(<vscale x 2 x double>, <vscale x 2 x double>)
275 declare <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)