1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=ALL,AVXVNNI
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VNNI
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni -mattr=+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VLVNNI
6 define i32 @mul_4xi8_zc_exceed(<4 x i8> %a, i32 %c) {
7 ; ALL-LABEL: mul_4xi8_zc_exceed:
8 ; ALL: # %bb.0: # %entry
9 ; ALL-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
10 ; ALL-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,128,0]
11 ; ALL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
12 ; ALL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
13 ; ALL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
14 ; ALL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
15 ; ALL-NEXT: vmovd %xmm0, %eax
16 ; ALL-NEXT: addl %edi, %eax
19 %0 = zext <4 x i8> %a to <4 x i32>
20 %1 = mul nsw <4 x i32> %0, <i32 0, i32 1, i32 2, i32 128>
21 %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %1)
22 %op.extra = add nsw i32 %2, %c
26 define i32 @mul_4xi8_zc(<4 x i8> %a, i32 %c) {
27 ; AVXVNNI-LABEL: mul_4xi8_zc:
28 ; AVXVNNI: # %bb.0: # %entry
29 ; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
30 ; AVXVNNI-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
31 ; AVXVNNI-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
32 ; AVXVNNI-NEXT: vmovd %xmm1, %eax
33 ; AVXVNNI-NEXT: addl %edi, %eax
36 ; AVX512VNNI-LABEL: mul_4xi8_zc:
37 ; AVX512VNNI: # %bb.0: # %entry
38 ; AVX512VNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
39 ; AVX512VNNI-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
40 ; AVX512VNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
41 ; AVX512VNNI-NEXT: vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
42 ; AVX512VNNI-NEXT: vmovd %xmm1, %eax
43 ; AVX512VNNI-NEXT: addl %edi, %eax
44 ; AVX512VNNI-NEXT: vzeroupper
45 ; AVX512VNNI-NEXT: retq
47 ; AVX512VLVNNI-LABEL: mul_4xi8_zc:
48 ; AVX512VLVNNI: # %bb.0: # %entry
49 ; AVX512VLVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
50 ; AVX512VLVNNI-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
51 ; AVX512VLVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
52 ; AVX512VLVNNI-NEXT: vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
53 ; AVX512VLVNNI-NEXT: vmovd %xmm1, %eax
54 ; AVX512VLVNNI-NEXT: addl %edi, %eax
55 ; AVX512VLVNNI-NEXT: retq
57 %0 = zext <4 x i8> %a to <4 x i32>
58 %1 = mul nsw <4 x i32> %0, <i32 16, i32 1, i32 2, i32 127>
59 %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %1)
60 %op.extra = add nsw i32 %2, %c
64 define i32 @mul_4xi4_cz(<4 x i4> %a, i32 %c) {
65 ; AVXVNNI-LABEL: mul_4xi4_cz:
66 ; AVXVNNI: # %bb.0: # %entry
67 ; AVXVNNI-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
68 ; AVXVNNI-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
69 ; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
70 ; AVXVNNI-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
71 ; AVXVNNI-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
72 ; AVXVNNI-NEXT: vmovd %xmm1, %eax
73 ; AVXVNNI-NEXT: addl %edi, %eax
76 ; AVX512VNNI-LABEL: mul_4xi4_cz:
77 ; AVX512VNNI: # %bb.0: # %entry
78 ; AVX512VNNI-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
79 ; AVX512VNNI-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
80 ; AVX512VNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
81 ; AVX512VNNI-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
82 ; AVX512VNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
83 ; AVX512VNNI-NEXT: vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
84 ; AVX512VNNI-NEXT: vmovd %xmm1, %eax
85 ; AVX512VNNI-NEXT: addl %edi, %eax
86 ; AVX512VNNI-NEXT: vzeroupper
87 ; AVX512VNNI-NEXT: retq
89 ; AVX512VLVNNI-LABEL: mul_4xi4_cz:
90 ; AVX512VLVNNI: # %bb.0: # %entry
91 ; AVX512VLVNNI-NEXT: vpmovdb %xmm0, %xmm0
92 ; AVX512VLVNNI-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
93 ; AVX512VLVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
94 ; AVX512VLVNNI-NEXT: vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
95 ; AVX512VLVNNI-NEXT: vmovd %xmm1, %eax
96 ; AVX512VLVNNI-NEXT: addl %edi, %eax
97 ; AVX512VLVNNI-NEXT: retq
99 %0 = zext <4 x i4> %a to <4 x i32>
100 %1 = mul nsw <4 x i32> <i32 16, i32 1, i32 2, i32 127>, %0
101 %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %1)
102 %op.extra = add nsw i32 %2, %c
106 define i32 @mul_4xi8_cs(<4 x i8> %a, i32 %c) {
107 ; AVXVNNI-LABEL: mul_4xi8_cs:
108 ; AVXVNNI: # %bb.0: # %entry
109 ; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
110 ; AVXVNNI-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
111 ; AVXVNNI-NEXT: vmovd {{.*#+}} xmm2 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0]
112 ; AVXVNNI-NEXT: {vex} vpdpbusd %xmm0, %xmm2, %xmm1
113 ; AVXVNNI-NEXT: vmovd %xmm1, %eax
114 ; AVXVNNI-NEXT: addl %edi, %eax
117 ; AVX512VNNI-LABEL: mul_4xi8_cs:
118 ; AVX512VNNI: # %bb.0: # %entry
119 ; AVX512VNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
120 ; AVX512VNNI-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
121 ; AVX512VNNI-NEXT: vmovd {{.*#+}} xmm1 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0]
122 ; AVX512VNNI-NEXT: vpxor %xmm2, %xmm2, %xmm2
123 ; AVX512VNNI-NEXT: vpdpbusd %zmm0, %zmm1, %zmm2
124 ; AVX512VNNI-NEXT: vmovd %xmm2, %eax
125 ; AVX512VNNI-NEXT: addl %edi, %eax
126 ; AVX512VNNI-NEXT: vzeroupper
127 ; AVX512VNNI-NEXT: retq
129 ; AVX512VLVNNI-LABEL: mul_4xi8_cs:
130 ; AVX512VLVNNI: # %bb.0: # %entry
131 ; AVX512VLVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
132 ; AVX512VLVNNI-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
133 ; AVX512VLVNNI-NEXT: vmovd {{.*#+}} xmm1 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0]
134 ; AVX512VLVNNI-NEXT: vpxor %xmm2, %xmm2, %xmm2
135 ; AVX512VLVNNI-NEXT: vpdpbusd %xmm0, %xmm1, %xmm2
136 ; AVX512VLVNNI-NEXT: vmovd %xmm2, %eax
137 ; AVX512VLVNNI-NEXT: addl %edi, %eax
138 ; AVX512VLVNNI-NEXT: retq
140 %0 = sext <4 x i8> %a to <4 x i32>
141 %1 = mul nsw <4 x i32> <i32 16, i32 1, i32 2, i32 255>, %0
142 %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %1)
143 %op.extra = add nsw i32 %2, %c
147 define i32 @mul_4xi8_cs_exceed(<4 x i8> %a, i32 %c) {
148 ; ALL-LABEL: mul_4xi8_cs_exceed:
149 ; ALL: # %bb.0: # %entry
150 ; ALL-NEXT: vpmovsxbd %xmm0, %xmm0
151 ; ALL-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,256,0]
152 ; ALL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
153 ; ALL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
154 ; ALL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
155 ; ALL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
156 ; ALL-NEXT: vmovd %xmm0, %eax
157 ; ALL-NEXT: addl %edi, %eax
160 %0 = sext <4 x i8> %a to <4 x i32>
161 %1 = mul nsw <4 x i32> <i32 0, i32 1, i32 2, i32 256>, %0
162 %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %1)
163 %op.extra = add nsw i32 %2, %c
167 define i32 @mul_16xi8_zc(<16 x i8> %a, i32 %c) {
168 ; AVXVNNI-LABEL: mul_16xi8_zc:
169 ; AVXVNNI: # %bb.0: # %entry
170 ; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
171 ; AVXVNNI-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
172 ; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
173 ; AVXVNNI-NEXT: vpaddd %xmm0, %xmm1, %xmm0
174 ; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
175 ; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
176 ; AVXVNNI-NEXT: vmovd %xmm0, %eax
177 ; AVXVNNI-NEXT: addl %edi, %eax
180 ; AVX512VNNI-LABEL: mul_16xi8_zc:
181 ; AVX512VNNI: # %bb.0: # %entry
182 ; AVX512VNNI-NEXT: vmovdqa %xmm0, %xmm0
183 ; AVX512VNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
184 ; AVX512VNNI-NEXT: vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
185 ; AVX512VNNI-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
186 ; AVX512VNNI-NEXT: vpaddd %xmm0, %xmm1, %xmm0
187 ; AVX512VNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
188 ; AVX512VNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
189 ; AVX512VNNI-NEXT: vmovd %xmm0, %eax
190 ; AVX512VNNI-NEXT: addl %edi, %eax
191 ; AVX512VNNI-NEXT: vzeroupper
192 ; AVX512VNNI-NEXT: retq
194 ; AVX512VLVNNI-LABEL: mul_16xi8_zc:
195 ; AVX512VLVNNI: # %bb.0: # %entry
196 ; AVX512VLVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
197 ; AVX512VLVNNI-NEXT: vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm1
198 ; AVX512VLVNNI-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
199 ; AVX512VLVNNI-NEXT: vpaddd %xmm0, %xmm1, %xmm0
200 ; AVX512VLVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
201 ; AVX512VLVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
202 ; AVX512VLVNNI-NEXT: vmovd %xmm0, %eax
203 ; AVX512VLVNNI-NEXT: addl %edi, %eax
204 ; AVX512VLVNNI-NEXT: retq
206 %0 = zext <16 x i8> %a to <16 x i32>
207 %1 = mul nsw <16 x i32> %0, <i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64>
208 %2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %1)
209 %op.extra = add nsw i32 %2, %c
213 define i32 @mul_32xi8_zc(<32 x i8> %a, i32 %c) {
214 ; AVXVNNI-LABEL: mul_32xi8_zc:
215 ; AVXVNNI: # %bb.0: # %entry
216 ; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
217 ; AVXVNNI-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
218 ; AVXVNNI-NEXT: vextracti128 $1, %ymm1, %xmm0
219 ; AVXVNNI-NEXT: vpaddd %xmm0, %xmm1, %xmm0
220 ; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
221 ; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
222 ; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
223 ; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
224 ; AVXVNNI-NEXT: vmovd %xmm0, %eax
225 ; AVXVNNI-NEXT: addl %edi, %eax
226 ; AVXVNNI-NEXT: vzeroupper
229 ; AVX512VNNI-LABEL: mul_32xi8_zc:
230 ; AVX512VNNI: # %bb.0: # %entry
231 ; AVX512VNNI-NEXT: vmovdqa %ymm0, %ymm0
232 ; AVX512VNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
233 ; AVX512VNNI-NEXT: vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
234 ; AVX512VNNI-NEXT: vextracti128 $1, %ymm1, %xmm0
235 ; AVX512VNNI-NEXT: vpaddd %xmm0, %xmm1, %xmm0
236 ; AVX512VNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
237 ; AVX512VNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
238 ; AVX512VNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
239 ; AVX512VNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
240 ; AVX512VNNI-NEXT: vmovd %xmm0, %eax
241 ; AVX512VNNI-NEXT: addl %edi, %eax
242 ; AVX512VNNI-NEXT: vzeroupper
243 ; AVX512VNNI-NEXT: retq
245 ; AVX512VLVNNI-LABEL: mul_32xi8_zc:
246 ; AVX512VLVNNI: # %bb.0: # %entry
247 ; AVX512VLVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
248 ; AVX512VLVNNI-NEXT: vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm1
249 ; AVX512VLVNNI-NEXT: vextracti128 $1, %ymm1, %xmm0
250 ; AVX512VLVNNI-NEXT: vpaddd %xmm0, %xmm1, %xmm0
251 ; AVX512VLVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
252 ; AVX512VLVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
253 ; AVX512VLVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
254 ; AVX512VLVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
255 ; AVX512VLVNNI-NEXT: vmovd %xmm0, %eax
256 ; AVX512VLVNNI-NEXT: addl %edi, %eax
257 ; AVX512VLVNNI-NEXT: vzeroupper
258 ; AVX512VLVNNI-NEXT: retq
260 %0 = zext <32 x i8> %a to <32 x i32>
261 %1 = mul nsw <32 x i32> %0, <i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64>
262 %2 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %1)
263 %op.extra = add nsw i32 %2, %c
267 define i32 @mul_64xi8_zc(<64 x i8> %a, i32 %c) {
268 ; AVXVNNI-LABEL: mul_64xi8_zc:
269 ; AVXVNNI: # %bb.0: # %entry
270 ; AVXVNNI-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64]
271 ; AVXVNNI-NEXT: vpxor %xmm3, %xmm3, %xmm3
272 ; AVXVNNI-NEXT: vpxor %xmm4, %xmm4, %xmm4
273 ; AVXVNNI-NEXT: {vex} vpdpbusd %ymm2, %ymm1, %ymm4
274 ; AVXVNNI-NEXT: {vex} vpdpbusd %ymm2, %ymm0, %ymm3
275 ; AVXVNNI-NEXT: vpaddd %ymm4, %ymm3, %ymm0
276 ; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1
277 ; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
278 ; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
279 ; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
280 ; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
281 ; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
282 ; AVXVNNI-NEXT: vmovd %xmm0, %eax
283 ; AVXVNNI-NEXT: addl %edi, %eax
284 ; AVXVNNI-NEXT: vzeroupper
287 ; AVX512-LABEL: mul_64xi8_zc:
288 ; AVX512: # %bb.0: # %entry
289 ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
290 ; AVX512-NEXT: vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm1
291 ; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm0
292 ; AVX512-NEXT: vpaddd %zmm0, %zmm1, %zmm0
293 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
294 ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
295 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
296 ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
297 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
298 ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
299 ; AVX512-NEXT: vmovd %xmm0, %eax
300 ; AVX512-NEXT: addl %edi, %eax
301 ; AVX512-NEXT: vzeroupper
304 %0 = zext <64 x i8> %a to <64 x i32>
305 %1 = mul nsw <64 x i32> %0, <i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64>
306 %2 = call i32 @llvm.vector.reduce.add.v64i32(<64 x i32> %1)
307 %op.extra = add nsw i32 %2, %c
311 declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
312 declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
313 declare i32 @llvm.vector.reduce.add.v32i32(<32 x i32>)
314 declare i32 @llvm.vector.reduce.add.v64i32(<64 x i32>)