1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X86
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64
5 ; widening shuffle v3float and then a add
6 define void @shuf(ptr %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind {
8 ; X86: # %bb.0: # %entry
9 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
10 ; X86-NEXT: addps %xmm1, %xmm0
11 ; X86-NEXT: extractps $2, %xmm0, 8(%eax)
12 ; X86-NEXT: extractps $1, %xmm0, 4(%eax)
13 ; X86-NEXT: movss %xmm0, (%eax)
17 ; X64: # %bb.0: # %entry
18 ; X64-NEXT: addps %xmm1, %xmm0
19 ; X64-NEXT: extractps $2, %xmm0, 8(%rdi)
20 ; X64-NEXT: movlps %xmm0, (%rdi)
23 %x = shufflevector <3 x float> %src1, <3 x float> %src2, <3 x i32> < i32 0, i32 1, i32 2>
24 %val = fadd <3 x float> %x, %src2
25 store <3 x float> %val, ptr %dst.addr
30 ; widening shuffle v3float with a different mask and then a add
31 define void @shuf2(ptr %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind {
33 ; X86: # %bb.0: # %entry
34 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
35 ; X86-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
36 ; X86-NEXT: addps %xmm1, %xmm0
37 ; X86-NEXT: extractps $2, %xmm0, 8(%eax)
38 ; X86-NEXT: extractps $1, %xmm0, 4(%eax)
39 ; X86-NEXT: movss %xmm0, (%eax)
43 ; X64: # %bb.0: # %entry
44 ; X64-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
45 ; X64-NEXT: addps %xmm1, %xmm0
46 ; X64-NEXT: extractps $2, %xmm0, 8(%rdi)
47 ; X64-NEXT: movlps %xmm0, (%rdi)
50 %x = shufflevector <3 x float> %src1, <3 x float> %src2, <3 x i32> < i32 0, i32 4, i32 2>
51 %val = fadd <3 x float> %x, %src2
52 store <3 x float> %val, ptr %dst.addr
56 ; Example of when widening a v3float operation causes the DAG to replace a node
57 ; with the operation that we are currently widening, i.e. when replacing
58 ; opA with opB, the DAG will produce new operations with opA.
59 define void @shuf3(<4 x float> %tmp10, <4 x float> %vecinit15, ptr %dst) nounwind {
61 ; X86: # %bb.0: # %entry
62 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
63 ; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
64 ; X86-NEXT: movaps %xmm1, (%eax)
68 ; X64: # %bb.0: # %entry
69 ; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
70 ; X64-NEXT: movaps %xmm1, (%rdi)
73 %shuffle.i.i.i12 = shufflevector <4 x float> %tmp10, <4 x float> %vecinit15, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
74 %tmp25.i.i = shufflevector <4 x float> %shuffle.i.i.i12, <4 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
75 %tmp1.i.i = shufflevector <3 x float> %tmp25.i.i, <3 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
76 %tmp3.i13 = shufflevector <4 x float> %tmp1.i.i, <4 x float> poison, <3 x i32> <i32 0, i32 1, i32 2> ; <<3 x float>>
77 %tmp6.i14 = shufflevector <3 x float> %tmp3.i13, <3 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
78 %tmp97.i = shufflevector <4 x float> %tmp6.i14, <4 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
79 %tmp2.i18 = shufflevector <3 x float> %tmp97.i, <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
80 %t5 = bitcast <4 x float> %tmp2.i18 to <4 x i32>
81 %shr.i.i19 = lshr <4 x i32> %t5, <i32 19, i32 19, i32 19, i32 19>
82 %and.i.i20 = and <4 x i32> %shr.i.i19, <i32 4080, i32 4080, i32 4080, i32 4080>
83 %shuffle.i.i.i21 = shufflevector <4 x float> %tmp2.i18, <4 x float> poison, <4 x i32> <i32 2, i32 3, i32 2, i32 3>
84 store <4 x float> %shuffle.i.i.i21, ptr %dst
88 ; PR10421: make sure we correctly handle extreme widening with CONCAT_VECTORS
89 define <8 x i8> @shuf4(<4 x i8> %a, <4 x i8> %b) nounwind readnone {
92 ; X86-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
97 ; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
99 %vshuf = shufflevector <4 x i8> %a, <4 x i8> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
103 ; PR11389: another CONCAT_VECTORS case
104 define void @shuf5(ptr %p) nounwind {
107 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
108 ; X86-NEXT: movsd {{.*#+}} xmm0 = [33,33,33,33,33,33,33,33,0,0,0,0,0,0,0,0]
109 ; X86-NEXT: movsd %xmm0, (%eax)
114 ; X64-NEXT: movabsq $2387225703656530209, %rax # imm = 0x2121212121212121
115 ; X64-NEXT: movq %rax, (%rdi)
117 %v = shufflevector <2 x i8> <i8 4, i8 33>, <2 x i8> poison, <8 x i32> <i32 1, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
118 store <8 x i8> %v, ptr %p, align 8