1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2-SLOW
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=CHECK,AVX,AVX2-FAST
7 define <4 x i32> @combine_vec_lshr_zero(<4 x i32> %x) {
8 ; SSE-LABEL: combine_vec_lshr_zero:
10 ; SSE-NEXT: xorps %xmm0, %xmm0
13 ; AVX-LABEL: combine_vec_lshr_zero:
15 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
17 %1 = lshr <4 x i32> zeroinitializer, %x
21 ; fold (srl x, c >= size(x)) -> undef
22 define <4 x i32> @combine_vec_lshr_outofrange0(<4 x i32> %x) {
23 ; CHECK-LABEL: combine_vec_lshr_outofrange0:
26 %1 = lshr <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
30 define <4 x i32> @combine_vec_lshr_outofrange1(<4 x i32> %x) {
31 ; CHECK-LABEL: combine_vec_lshr_outofrange1:
34 %1 = lshr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
38 define <4 x i32> @combine_vec_lshr_outofrange2(<4 x i32> %x) {
39 ; CHECK-LABEL: combine_vec_lshr_outofrange2:
42 %1 = lshr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 undef>
46 ; fold (srl x, 0) -> x
47 define <4 x i32> @combine_vec_lshr_by_zero(<4 x i32> %x) {
48 ; CHECK-LABEL: combine_vec_lshr_by_zero:
51 %1 = lshr <4 x i32> %x, zeroinitializer
55 ; if (srl x, c) is known to be zero, return 0
56 define <4 x i32> @combine_vec_lshr_known_zero0(<4 x i32> %x) {
57 ; SSE-LABEL: combine_vec_lshr_known_zero0:
59 ; SSE-NEXT: xorps %xmm0, %xmm0
62 ; AVX-LABEL: combine_vec_lshr_known_zero0:
64 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
66 %1 = and <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15>
67 %2 = lshr <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
71 define <4 x i32> @combine_vec_lshr_known_zero1(<4 x i32> %x) {
72 ; SSE-LABEL: combine_vec_lshr_known_zero1:
74 ; SSE-NEXT: xorps %xmm0, %xmm0
77 ; AVX-LABEL: combine_vec_lshr_known_zero1:
79 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [15,15,15,15]
80 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
81 ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
83 %1 = and <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15>
84 %2 = lshr <4 x i32> %1, <i32 8, i32 9, i32 10, i32 11>
88 ; fold (srl (srl x, c1), c2) -> (srl x, (add c1, c2))
89 define <4 x i32> @combine_vec_lshr_lshr0(<4 x i32> %x) {
90 ; SSE-LABEL: combine_vec_lshr_lshr0:
92 ; SSE-NEXT: psrld $6, %xmm0
95 ; AVX-LABEL: combine_vec_lshr_lshr0:
97 ; AVX-NEXT: vpsrld $6, %xmm0, %xmm0
99 %1 = lshr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
100 %2 = lshr <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
104 define <4 x i32> @combine_vec_lshr_lshr1(<4 x i32> %x) {
105 ; SSE-LABEL: combine_vec_lshr_lshr1:
107 ; SSE-NEXT: movdqa %xmm0, %xmm1
108 ; SSE-NEXT: psrld $10, %xmm1
109 ; SSE-NEXT: movdqa %xmm0, %xmm2
110 ; SSE-NEXT: psrld $6, %xmm2
111 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
112 ; SSE-NEXT: movdqa %xmm0, %xmm1
113 ; SSE-NEXT: psrld $8, %xmm1
114 ; SSE-NEXT: psrld $4, %xmm0
115 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
116 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
119 ; AVX-LABEL: combine_vec_lshr_lshr1:
121 ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
123 %1 = lshr <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
124 %2 = lshr <4 x i32> %1, <i32 4, i32 5, i32 6, i32 7>
128 ; fold (srl (srl x, c1), c2) -> 0
129 define <4 x i32> @combine_vec_lshr_lshr_zero0(<4 x i32> %x) {
130 ; SSE-LABEL: combine_vec_lshr_lshr_zero0:
132 ; SSE-NEXT: xorps %xmm0, %xmm0
135 ; AVX-LABEL: combine_vec_lshr_lshr_zero0:
137 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
139 %1 = lshr <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
140 %2 = lshr <4 x i32> %1, <i32 20, i32 20, i32 20, i32 20>
144 define <4 x i32> @combine_vec_lshr_lshr_zero1(<4 x i32> %x) {
145 ; SSE-LABEL: combine_vec_lshr_lshr_zero1:
147 ; SSE-NEXT: xorps %xmm0, %xmm0
150 ; AVX-LABEL: combine_vec_lshr_lshr_zero1:
152 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
154 %1 = lshr <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
155 %2 = lshr <4 x i32> %1, <i32 25, i32 26, i32 27, i32 28>
159 ; fold (srl (trunc (srl x, c1)), c2) -> (trunc (srl x, (add c1, c2)))
160 define <4 x i32> @combine_vec_lshr_trunc_lshr0(<4 x i64> %x) {
161 ; SSE-LABEL: combine_vec_lshr_trunc_lshr0:
163 ; SSE-NEXT: psrlq $48, %xmm1
164 ; SSE-NEXT: psrlq $48, %xmm0
165 ; SSE-NEXT: packusdw %xmm1, %xmm0
168 ; AVX-LABEL: combine_vec_lshr_trunc_lshr0:
170 ; AVX-NEXT: vpsrlq $48, %ymm0, %ymm0
171 ; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
172 ; AVX-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
173 ; AVX-NEXT: vzeroupper
175 %1 = lshr <4 x i64> %x, <i64 32, i64 32, i64 32, i64 32>
176 %2 = trunc <4 x i64> %1 to <4 x i32>
177 %3 = lshr <4 x i32> %2, <i32 16, i32 16, i32 16, i32 16>
181 define <4 x i32> @combine_vec_lshr_trunc_lshr1(<4 x i64> %x) {
182 ; SSE-LABEL: combine_vec_lshr_trunc_lshr1:
184 ; SSE-NEXT: movdqa %xmm1, %xmm2
185 ; SSE-NEXT: psrlq $35, %xmm2
186 ; SSE-NEXT: psrlq $34, %xmm1
187 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
188 ; SSE-NEXT: movdqa %xmm0, %xmm2
189 ; SSE-NEXT: psrlq $33, %xmm2
190 ; SSE-NEXT: psrlq $32, %xmm0
191 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
192 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
193 ; SSE-NEXT: movaps %xmm0, %xmm1
194 ; SSE-NEXT: psrld $19, %xmm1
195 ; SSE-NEXT: movaps %xmm0, %xmm2
196 ; SSE-NEXT: psrld $17, %xmm2
197 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
198 ; SSE-NEXT: movaps %xmm0, %xmm1
199 ; SSE-NEXT: psrld $18, %xmm1
200 ; SSE-NEXT: psrld $16, %xmm0
201 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
202 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
205 ; AVX2-SLOW-LABEL: combine_vec_lshr_trunc_lshr1:
206 ; AVX2-SLOW: # %bb.0:
207 ; AVX2-SLOW-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
208 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
209 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
210 ; AVX2-SLOW-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
211 ; AVX2-SLOW-NEXT: vzeroupper
212 ; AVX2-SLOW-NEXT: retq
214 ; AVX2-FAST-LABEL: combine_vec_lshr_trunc_lshr1:
215 ; AVX2-FAST: # %bb.0:
216 ; AVX2-FAST-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
217 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
218 ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
219 ; AVX2-FAST-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
220 ; AVX2-FAST-NEXT: vzeroupper
221 ; AVX2-FAST-NEXT: retq
222 %1 = lshr <4 x i64> %x, <i64 32, i64 33, i64 34, i64 35>
223 %2 = trunc <4 x i64> %1 to <4 x i32>
224 %3 = lshr <4 x i32> %2, <i32 16, i32 17, i32 18, i32 19>
228 ; fold (srl (trunc (srl x, c1)), c2) -> 0
229 define <4 x i32> @combine_vec_lshr_trunc_lshr_zero0(<4 x i64> %x) {
230 ; SSE-LABEL: combine_vec_lshr_trunc_lshr_zero0:
232 ; SSE-NEXT: xorps %xmm0, %xmm0
235 ; AVX-LABEL: combine_vec_lshr_trunc_lshr_zero0:
237 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
239 %1 = lshr <4 x i64> %x, <i64 48, i64 48, i64 48, i64 48>
240 %2 = trunc <4 x i64> %1 to <4 x i32>
241 %3 = lshr <4 x i32> %2, <i32 24, i32 24, i32 24, i32 24>
245 define <4 x i32> @combine_vec_lshr_trunc_lshr_zero1(<4 x i64> %x) {
246 ; SSE-LABEL: combine_vec_lshr_trunc_lshr_zero1:
248 ; SSE-NEXT: xorps %xmm0, %xmm0
251 ; AVX-LABEL: combine_vec_lshr_trunc_lshr_zero1:
253 ; AVX-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
254 ; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
255 ; AVX-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
256 ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
257 ; AVX-NEXT: vzeroupper
259 %1 = lshr <4 x i64> %x, <i64 48, i64 49, i64 50, i64 51>
260 %2 = trunc <4 x i64> %1 to <4 x i32>
261 %3 = lshr <4 x i32> %2, <i32 24, i32 25, i32 26, i32 27>
265 ; fold (srl (shl x, c), c) -> (and x, cst2)
266 define <4 x i32> @combine_vec_lshr_shl_mask0(<4 x i32> %x) {
267 ; SSE-LABEL: combine_vec_lshr_shl_mask0:
269 ; SSE-NEXT: andps {{.*}}(%rip), %xmm0
272 ; AVX-LABEL: combine_vec_lshr_shl_mask0:
274 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [1073741823,1073741823,1073741823,1073741823]
275 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
277 %1 = shl <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
278 %2 = lshr <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
282 define <4 x i32> @combine_vec_lshr_shl_mask1(<4 x i32> %x) {
283 ; SSE-LABEL: combine_vec_lshr_shl_mask1:
285 ; SSE-NEXT: andps {{.*}}(%rip), %xmm0
288 ; AVX-LABEL: combine_vec_lshr_shl_mask1:
290 ; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
292 %1 = shl <4 x i32> %x, <i32 2, i32 3, i32 4, i32 5>
293 %2 = lshr <4 x i32> %1, <i32 2, i32 3, i32 4, i32 5>
297 ; fold (srl (sra X, Y), 31) -> (srl X, 31)
298 define <4 x i32> @combine_vec_lshr_ashr_sign(<4 x i32> %x, <4 x i32> %y) {
299 ; SSE-LABEL: combine_vec_lshr_ashr_sign:
301 ; SSE-NEXT: psrld $31, %xmm0
304 ; AVX-LABEL: combine_vec_lshr_ashr_sign:
306 ; AVX-NEXT: vpsrld $31, %xmm0, %xmm0
308 %1 = ashr <4 x i32> %x, %y
309 %2 = lshr <4 x i32> %1, <i32 31, i32 31, i32 31, i32 31>
313 ; fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit).
314 define <4 x i32> @combine_vec_lshr_lzcnt_bit0(<4 x i32> %x) {
315 ; SSE-LABEL: combine_vec_lshr_lzcnt_bit0:
317 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
318 ; SSE-NEXT: psrld $4, %xmm0
319 ; SSE-NEXT: pxor {{.*}}(%rip), %xmm0
322 ; AVX-LABEL: combine_vec_lshr_lzcnt_bit0:
324 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [16,16,16,16]
325 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
326 ; AVX-NEXT: vpsrld $4, %xmm0, %xmm0
327 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
328 ; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
330 %1 = and <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
331 %2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %1, i1 0)
332 %3 = lshr <4 x i32> %2, <i32 5, i32 5, i32 5, i32 5>
336 define <4 x i32> @combine_vec_lshr_lzcnt_bit1(<4 x i32> %x) {
337 ; SSE-LABEL: combine_vec_lshr_lzcnt_bit1:
339 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
340 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
341 ; SSE-NEXT: movdqa %xmm2, %xmm3
342 ; SSE-NEXT: pshufb %xmm0, %xmm3
343 ; SSE-NEXT: movdqa %xmm0, %xmm1
344 ; SSE-NEXT: psrlw $4, %xmm1
345 ; SSE-NEXT: pxor %xmm4, %xmm4
346 ; SSE-NEXT: pshufb %xmm1, %xmm2
347 ; SSE-NEXT: pcmpeqb %xmm4, %xmm1
348 ; SSE-NEXT: pand %xmm3, %xmm1
349 ; SSE-NEXT: paddb %xmm2, %xmm1
350 ; SSE-NEXT: movdqa %xmm0, %xmm2
351 ; SSE-NEXT: pcmpeqb %xmm4, %xmm2
352 ; SSE-NEXT: psrlw $8, %xmm2
353 ; SSE-NEXT: pand %xmm1, %xmm2
354 ; SSE-NEXT: psrlw $8, %xmm1
355 ; SSE-NEXT: paddw %xmm2, %xmm1
356 ; SSE-NEXT: pcmpeqw %xmm4, %xmm0
357 ; SSE-NEXT: psrld $16, %xmm0
358 ; SSE-NEXT: pand %xmm1, %xmm0
359 ; SSE-NEXT: psrld $16, %xmm1
360 ; SSE-NEXT: paddd %xmm0, %xmm1
361 ; SSE-NEXT: psrld $5, %xmm1
362 ; SSE-NEXT: movdqa %xmm1, %xmm0
365 ; AVX-LABEL: combine_vec_lshr_lzcnt_bit1:
367 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
368 ; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
369 ; AVX-NEXT: vpshufb %xmm0, %xmm1, %xmm2
370 ; AVX-NEXT: vpsrlw $4, %xmm0, %xmm3
371 ; AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
372 ; AVX-NEXT: vpcmpeqb %xmm4, %xmm3, %xmm5
373 ; AVX-NEXT: vpand %xmm5, %xmm2, %xmm2
374 ; AVX-NEXT: vpshufb %xmm3, %xmm1, %xmm1
375 ; AVX-NEXT: vpaddb %xmm1, %xmm2, %xmm1
376 ; AVX-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm2
377 ; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2
378 ; AVX-NEXT: vpand %xmm2, %xmm1, %xmm2
379 ; AVX-NEXT: vpsrlw $8, %xmm1, %xmm1
380 ; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1
381 ; AVX-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
382 ; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
383 ; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0
384 ; AVX-NEXT: vpsrld $16, %xmm1, %xmm1
385 ; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0
386 ; AVX-NEXT: vpsrld $5, %xmm0, %xmm0
388 %1 = and <4 x i32> %x, <i32 4, i32 32, i32 64, i32 128>
389 %2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %1, i1 0)
390 %3 = lshr <4 x i32> %2, <i32 5, i32 5, i32 5, i32 5>
393 declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1)
395 ; fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))).
396 define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
397 ; SSE-LABEL: combine_vec_lshr_trunc_and:
399 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
400 ; SSE-NEXT: andps {{.*}}(%rip), %xmm1
401 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
402 ; SSE-NEXT: movdqa %xmm0, %xmm3
403 ; SSE-NEXT: psrld %xmm2, %xmm3
404 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
405 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
406 ; SSE-NEXT: movdqa %xmm0, %xmm5
407 ; SSE-NEXT: psrld %xmm4, %xmm5
408 ; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
409 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
410 ; SSE-NEXT: movdqa %xmm0, %xmm3
411 ; SSE-NEXT: psrld %xmm1, %xmm3
412 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
413 ; SSE-NEXT: psrld %xmm1, %xmm0
414 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
415 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
418 ; AVX2-SLOW-LABEL: combine_vec_lshr_trunc_and:
419 ; AVX2-SLOW: # %bb.0:
420 ; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
421 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
422 ; AVX2-SLOW-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
423 ; AVX2-SLOW-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
424 ; AVX2-SLOW-NEXT: vzeroupper
425 ; AVX2-SLOW-NEXT: retq
427 ; AVX2-FAST-LABEL: combine_vec_lshr_trunc_and:
428 ; AVX2-FAST: # %bb.0:
429 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
430 ; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
431 ; AVX2-FAST-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
432 ; AVX2-FAST-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
433 ; AVX2-FAST-NEXT: vzeroupper
434 ; AVX2-FAST-NEXT: retq
435 %1 = and <4 x i64> %y, <i64 15, i64 255, i64 4095, i64 65535>
436 %2 = trunc <4 x i64> %1 to <4 x i32>
437 %3 = lshr <4 x i32> %x, %2