1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX
6 define <4 x i32> @combine_vec_sub_zero(<4 x i32> %a) {
7 ; CHECK-LABEL: combine_vec_sub_zero:
10 %1 = sub <4 x i32> %a, zeroinitializer
14 ; fold (sub x, x) -> 0
15 define <4 x i32> @combine_vec_sub_self(<4 x i32> %a) {
16 ; SSE-LABEL: combine_vec_sub_self:
18 ; SSE-NEXT: xorps %xmm0, %xmm0
21 ; AVX-LABEL: combine_vec_sub_self:
23 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
25 %1 = sub <4 x i32> %a, %a
29 ; fold (sub x, c) -> (add x, -c)
30 define <4 x i32> @combine_vec_sub_constant(<4 x i32> %x) {
31 ; SSE-LABEL: combine_vec_sub_constant:
33 ; SSE-NEXT: psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
36 ; AVX-LABEL: combine_vec_sub_constant:
38 ; AVX-NEXT: vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
40 %1 = sub <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
44 ; Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1)
45 define <4 x i32> @combine_vec_sub_negone(<4 x i32> %x) {
46 ; SSE-LABEL: combine_vec_sub_negone:
48 ; SSE-NEXT: pcmpeqd %xmm1, %xmm1
49 ; SSE-NEXT: pxor %xmm1, %xmm0
52 ; AVX-LABEL: combine_vec_sub_negone:
54 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
55 ; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
57 %1 = sub <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %x
62 define <4 x i32> @combine_vec_sub_sub(<4 x i32> %a, <4 x i32> %b) {
63 ; SSE-LABEL: combine_vec_sub_sub:
65 ; SSE-NEXT: movaps %xmm1, %xmm0
68 ; AVX-LABEL: combine_vec_sub_sub:
70 ; AVX-NEXT: vmovaps %xmm1, %xmm0
72 %1 = sub <4 x i32> %a, %b
73 %2 = sub <4 x i32> %a, %1
78 define <4 x i32> @combine_vec_sub_add0(<4 x i32> %a, <4 x i32> %b) {
79 ; SSE-LABEL: combine_vec_sub_add0:
81 ; SSE-NEXT: movaps %xmm1, %xmm0
84 ; AVX-LABEL: combine_vec_sub_add0:
86 ; AVX-NEXT: vmovaps %xmm1, %xmm0
88 %1 = add <4 x i32> %a, %b
89 %2 = sub <4 x i32> %1, %a
94 define <4 x i32> @combine_vec_sub_add1(<4 x i32> %a, <4 x i32> %b) {
95 ; CHECK-LABEL: combine_vec_sub_add1:
98 %1 = add <4 x i32> %a, %b
99 %2 = sub <4 x i32> %1, %b
103 ; fold C2-(A+C1) -> (C2-C1)-A
104 define <4 x i32> @combine_vec_sub_constant_add(<4 x i32> %a) {
105 ; SSE-LABEL: combine_vec_sub_constant_add:
107 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [3,1,4294967295,4294967293]
108 ; SSE-NEXT: psubd %xmm0, %xmm1
109 ; SSE-NEXT: movdqa %xmm1, %xmm0
112 ; AVX-LABEL: combine_vec_sub_constant_add:
114 ; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [3,1,4294967295,4294967293]
115 ; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0
117 %1 = add <4 x i32> %a, <i32 0, i32 1, i32 2, i32 3>
118 %2 = sub <4 x i32> <i32 3, i32 2, i32 1, i32 0>, %1
122 ; fold ((A+(B+C))-B) -> A+C
123 define <4 x i32> @combine_vec_sub_add_add(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
124 ; SSE-LABEL: combine_vec_sub_add_add:
126 ; SSE-NEXT: paddd %xmm2, %xmm0
129 ; AVX-LABEL: combine_vec_sub_add_add:
131 ; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0
133 %1 = add <4 x i32> %b, %c
134 %2 = add <4 x i32> %a, %1
135 %3 = sub <4 x i32> %2, %b
139 ; fold ((A+(B-C))-B) -> A-C
140 define <4 x i32> @combine_vec_sub_add_sub(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
141 ; SSE-LABEL: combine_vec_sub_add_sub:
143 ; SSE-NEXT: psubd %xmm2, %xmm0
146 ; AVX-LABEL: combine_vec_sub_add_sub:
148 ; AVX-NEXT: vpsubd %xmm2, %xmm0, %xmm0
150 %1 = sub <4 x i32> %b, %c
151 %2 = add <4 x i32> %a, %1
152 %3 = sub <4 x i32> %2, %b
156 ; fold ((A-(B-C))-C) -> A-B
157 define <4 x i32> @combine_vec_sub_sub_sub(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
158 ; SSE-LABEL: combine_vec_sub_sub_sub:
160 ; SSE-NEXT: psubd %xmm1, %xmm0
163 ; AVX-LABEL: combine_vec_sub_sub_sub:
165 ; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
167 %1 = sub <4 x i32> %b, %c
168 %2 = sub <4 x i32> %a, %1
169 %3 = sub <4 x i32> %2, %c
173 ; fold undef-A -> undef
174 define <4 x i32> @combine_vec_sub_undef0(<4 x i32> %a) {
175 ; CHECK-LABEL: combine_vec_sub_undef0:
178 %1 = sub <4 x i32> undef, %a
182 ; fold A-undef -> undef
183 define <4 x i32> @combine_vec_sub_undef1(<4 x i32> %a) {
184 ; CHECK-LABEL: combine_vec_sub_undef1:
187 %1 = sub <4 x i32> %a, undef
191 ; sub X, (sext Y i1) -> add X, (and Y 1)
192 define <4 x i32> @combine_vec_add_sext(<4 x i32> %x, <4 x i1> %y) {
193 ; SSE-LABEL: combine_vec_add_sext:
195 ; SSE-NEXT: pslld $31, %xmm1
196 ; SSE-NEXT: psrad $31, %xmm1
197 ; SSE-NEXT: psubd %xmm1, %xmm0
200 ; AVX-LABEL: combine_vec_add_sext:
202 ; AVX-NEXT: vpslld $31, %xmm1, %xmm1
203 ; AVX-NEXT: vpsrad $31, %xmm1, %xmm1
204 ; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
206 %1 = sext <4 x i1> %y to <4 x i32>
207 %2 = sub <4 x i32> %x, %1
211 ; sub X, (sextinreg Y i1) -> add X, (and Y 1)
212 define <4 x i32> @combine_vec_sub_sextinreg(<4 x i32> %x, <4 x i32> %y) {
213 ; SSE-LABEL: combine_vec_sub_sextinreg:
215 ; SSE-NEXT: pslld $31, %xmm1
216 ; SSE-NEXT: psrad $31, %xmm1
217 ; SSE-NEXT: psubd %xmm1, %xmm0
220 ; AVX-LABEL: combine_vec_sub_sextinreg:
222 ; AVX-NEXT: vpslld $31, %xmm1, %xmm1
223 ; AVX-NEXT: vpsrad $31, %xmm1, %xmm1
224 ; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
226 %1 = shl <4 x i32> %y, <i32 31, i32 31, i32 31, i32 31>
227 %2 = ashr <4 x i32> %1, <i32 31, i32 31, i32 31, i32 31>
228 %3 = sub <4 x i32> %x, %2
232 ; sub C1, (xor X, C1) -> add (xor X, ~C2), C1+1
233 define i32 @combine_sub_xor_consts(i32 %x) {
234 ; CHECK-LABEL: combine_sub_xor_consts:
236 ; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
237 ; CHECK-NEXT: xorl $-32, %edi
238 ; CHECK-NEXT: leal 33(%rdi), %eax
240 %xor = xor i32 %x, 31
241 %sub = sub i32 32, %xor
245 define <4 x i32> @combine_vec_sub_xor_consts(<4 x i32> %x) {
246 ; SSE-LABEL: combine_vec_sub_xor_consts:
248 ; SSE-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
249 ; SSE-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
252 ; AVX-LABEL: combine_vec_sub_xor_consts:
254 ; AVX-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
255 ; AVX-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
257 %xor = xor <4 x i32> %x, <i32 28, i32 29, i32 -1, i32 -31>
258 %sub = sub <4 x i32> <i32 1, i32 2, i32 3, i32 4>, %xor
262 define <4 x i32> @combine_vec_neg_xor_consts(<4 x i32> %x) {
263 ; SSE-LABEL: combine_vec_neg_xor_consts:
265 ; SSE-NEXT: pcmpeqd %xmm1, %xmm1
266 ; SSE-NEXT: psubd %xmm1, %xmm0
269 ; AVX-LABEL: combine_vec_neg_xor_consts:
271 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
272 ; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
274 %xor = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
275 %sub = sub <4 x i32> zeroinitializer, %xor
279 ; With AVX, this could use broadcast (an extra load) and
280 ; load-folded 'add', but currently we favor the virtually
281 ; free pcmpeq instruction.
283 define void @PR52032_oneuse_constant(ptr %p) {
284 ; SSE-LABEL: PR52032_oneuse_constant:
286 ; SSE-NEXT: movdqu (%rdi), %xmm0
287 ; SSE-NEXT: movdqu 16(%rdi), %xmm1
288 ; SSE-NEXT: pcmpeqd %xmm2, %xmm2
289 ; SSE-NEXT: psubd %xmm2, %xmm1
290 ; SSE-NEXT: psubd %xmm2, %xmm0
291 ; SSE-NEXT: movdqu %xmm0, (%rdi)
292 ; SSE-NEXT: movdqu %xmm1, 16(%rdi)
295 ; AVX-LABEL: PR52032_oneuse_constant:
297 ; AVX-NEXT: vmovdqu (%rdi), %ymm0
298 ; AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
299 ; AVX-NEXT: vpsubd %ymm1, %ymm0, %ymm0
300 ; AVX-NEXT: vmovdqu %ymm0, (%rdi)
301 ; AVX-NEXT: vzeroupper
303 %i3 = load <8 x i32>, ptr %p, align 4
304 %i4 = add nsw <8 x i32> %i3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
305 store <8 x i32> %i4, ptr %p, align 4
309 ; With AVX, we don't transform 'add' to 'sub' because that prevents load folding.
310 ; With SSE, we do it because we can't load fold the other op without overwriting the constant op.
312 define void @PR52032(ptr %p) {
313 ; SSE-LABEL: PR52032:
315 ; SSE-NEXT: pcmpeqd %xmm0, %xmm0
316 ; SSE-NEXT: movdqu (%rdi), %xmm1
317 ; SSE-NEXT: movdqu 16(%rdi), %xmm2
318 ; SSE-NEXT: movdqu 32(%rdi), %xmm3
319 ; SSE-NEXT: movdqu 48(%rdi), %xmm4
320 ; SSE-NEXT: psubd %xmm0, %xmm2
321 ; SSE-NEXT: psubd %xmm0, %xmm1
322 ; SSE-NEXT: movdqu %xmm1, (%rdi)
323 ; SSE-NEXT: movdqu %xmm2, 16(%rdi)
324 ; SSE-NEXT: psubd %xmm0, %xmm4
325 ; SSE-NEXT: psubd %xmm0, %xmm3
326 ; SSE-NEXT: movdqu %xmm3, 32(%rdi)
327 ; SSE-NEXT: movdqu %xmm4, 48(%rdi)
330 ; AVX-LABEL: PR52032:
332 ; AVX-NEXT: vpbroadcastd {{.*#+}} ymm0 = [1,1,1,1,1,1,1,1]
333 ; AVX-NEXT: vpaddd (%rdi), %ymm0, %ymm1
334 ; AVX-NEXT: vmovdqu %ymm1, (%rdi)
335 ; AVX-NEXT: vpaddd 32(%rdi), %ymm0, %ymm0
336 ; AVX-NEXT: vmovdqu %ymm0, 32(%rdi)
337 ; AVX-NEXT: vzeroupper
339 %i3 = load <8 x i32>, ptr %p, align 4
340 %i4 = add nsw <8 x i32> %i3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
341 store <8 x i32> %i4, ptr %p, align 4
342 %p2 = getelementptr inbounds <8 x i32>, ptr %p, i64 1
343 %i8 = load <8 x i32>, ptr %p2, align 4
344 %i9 = add nsw <8 x i32> %i8, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
345 store <8 x i32> %i9, ptr %p2, align 4
349 ; Same as above, but 128-bit ops:
350 ; With AVX, we don't transform 'add' to 'sub' because that prevents load folding.
351 ; With SSE, we do it because we can't load fold the other op without overwriting the constant op.
353 define void @PR52032_2(ptr %p) {
354 ; SSE-LABEL: PR52032_2:
356 ; SSE-NEXT: pcmpeqd %xmm0, %xmm0
357 ; SSE-NEXT: movdqu (%rdi), %xmm1
358 ; SSE-NEXT: movdqu 16(%rdi), %xmm2
359 ; SSE-NEXT: psubd %xmm0, %xmm1
360 ; SSE-NEXT: movdqu %xmm1, (%rdi)
361 ; SSE-NEXT: psubd %xmm0, %xmm2
362 ; SSE-NEXT: movdqu %xmm2, 16(%rdi)
365 ; AVX-LABEL: PR52032_2:
367 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm0 = [1,1,1,1]
368 ; AVX-NEXT: vpaddd (%rdi), %xmm0, %xmm1
369 ; AVX-NEXT: vmovdqu %xmm1, (%rdi)
370 ; AVX-NEXT: vpaddd 16(%rdi), %xmm0, %xmm0
371 ; AVX-NEXT: vmovdqu %xmm0, 16(%rdi)
373 %i3 = load <4 x i32>, ptr %p, align 4
374 %i4 = add nsw <4 x i32> %i3, <i32 1, i32 1, i32 1, i32 1>
375 store <4 x i32> %i4, ptr %p, align 4
376 %p2 = getelementptr inbounds <4 x i32>, ptr %p, i64 1
377 %i8 = load <4 x i32>, ptr %p2, align 4
378 %i9 = add nsw <4 x i32> %i8, <i32 1, i32 1, i32 1, i32 1>
379 store <4 x i32> %i9, ptr %p2, align 4
383 ; If we are starting with a 'sub', it is always better to do the transform.
385 define void @PR52032_3(ptr %p) {
386 ; SSE-LABEL: PR52032_3:
388 ; SSE-NEXT: pcmpeqd %xmm0, %xmm0
389 ; SSE-NEXT: movdqu (%rdi), %xmm1
390 ; SSE-NEXT: movdqu 16(%rdi), %xmm2
391 ; SSE-NEXT: paddd %xmm0, %xmm1
392 ; SSE-NEXT: movdqu %xmm1, (%rdi)
393 ; SSE-NEXT: paddd %xmm0, %xmm2
394 ; SSE-NEXT: movdqu %xmm2, 16(%rdi)
397 ; AVX-LABEL: PR52032_3:
399 ; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
400 ; AVX-NEXT: vpaddd (%rdi), %xmm0, %xmm1
401 ; AVX-NEXT: vmovdqu %xmm1, (%rdi)
402 ; AVX-NEXT: vpaddd 16(%rdi), %xmm0, %xmm0
403 ; AVX-NEXT: vmovdqu %xmm0, 16(%rdi)
405 %i3 = load <4 x i32>, ptr %p, align 4
406 %i4 = sub nsw <4 x i32> %i3, <i32 1, i32 1, i32 1, i32 1>
407 store <4 x i32> %i4, ptr %p, align 4
408 %p2 = getelementptr inbounds <4 x i32>, ptr %p, i64 1
409 %i8 = load <4 x i32>, ptr %p2, align 4
410 %i9 = sub nsw <4 x i32> %i8, <i32 1, i32 1, i32 1, i32 1>
411 store <4 x i32> %i9, ptr %p2, align 4
415 ; If there's no chance of profitable load folding (because of extra uses), we convert 'add' to 'sub'.
417 define void @PR52032_4(ptr %p, ptr %q) {
418 ; SSE-LABEL: PR52032_4:
420 ; SSE-NEXT: movdqu (%rdi), %xmm0
421 ; SSE-NEXT: movdqa %xmm0, (%rsi)
422 ; SSE-NEXT: pcmpeqd %xmm1, %xmm1
423 ; SSE-NEXT: psubd %xmm1, %xmm0
424 ; SSE-NEXT: movdqu %xmm0, (%rdi)
425 ; SSE-NEXT: movdqu 16(%rdi), %xmm0
426 ; SSE-NEXT: movdqa %xmm0, 16(%rsi)
427 ; SSE-NEXT: psubd %xmm1, %xmm0
428 ; SSE-NEXT: movdqu %xmm0, 16(%rdi)
431 ; AVX-LABEL: PR52032_4:
433 ; AVX-NEXT: vmovdqu (%rdi), %xmm0
434 ; AVX-NEXT: vmovdqa %xmm0, (%rsi)
435 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
436 ; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
437 ; AVX-NEXT: vmovdqu %xmm0, (%rdi)
438 ; AVX-NEXT: vmovdqu 16(%rdi), %xmm0
439 ; AVX-NEXT: vmovdqa %xmm0, 16(%rsi)
440 ; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
441 ; AVX-NEXT: vmovdqu %xmm0, 16(%rdi)
443 %i3 = load <4 x i32>, ptr %p, align 4
444 store <4 x i32> %i3, ptr %q
445 %i4 = add nsw <4 x i32> %i3, <i32 1, i32 1, i32 1, i32 1>
446 store <4 x i32> %i4, ptr %p, align 4
447 %p2 = getelementptr inbounds <4 x i32>, ptr %p, i64 1
448 %q2 = getelementptr inbounds <4 x i32>, ptr %q, i64 1
449 %i8 = load <4 x i32>, ptr %p2, align 4
450 store <4 x i32> %i8, ptr %q2
451 %i9 = add nsw <4 x i32> %i8, <i32 1, i32 1, i32 1, i32 1>
452 store <4 x i32> %i9, ptr %p2, align 4