1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-- -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X64
7 define <4 x i16> @func_16_32(ptr %a, ptr %b, ptr %c) nounwind {
8 ; X86-LABEL: func_16_32:
10 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
11 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
12 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
13 ; X86-NEXT: vmovdqa (%edx), %xmm0
14 ; X86-NEXT: vpaddw (%ecx), %xmm0, %xmm0
15 ; X86-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
16 ; X86-NEXT: vmovq %xmm0, (%eax)
19 ; X64-LABEL: func_16_32:
21 ; X64-NEXT: vmovdqa (%rsi), %xmm0
22 ; X64-NEXT: vpaddw (%rdi), %xmm0, %xmm0
23 ; X64-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
24 ; X64-NEXT: vmovq %xmm0, (%rdx)
26 %F = load <4 x i32>, ptr %a
27 %G = trunc <4 x i32> %F to <4 x i16>
28 %H = load <4 x i32>, ptr %b
29 %Y = trunc <4 x i32> %H to <4 x i16>
30 %T = add <4 x i16> %Y, %G
31 store <4 x i16>%T , ptr %c
35 define <4 x i16> @func_16_64(ptr %a, ptr %b, ptr %c) nounwind {
36 ; X86-LABEL: func_16_64:
38 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
39 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
40 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
41 ; X86-NEXT: vmovaps (%edx), %ymm0
42 ; X86-NEXT: vxorps (%ecx), %ymm0, %ymm0
43 ; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
44 ; X86-NEXT: vpxor %xmm2, %xmm2, %xmm2
45 ; X86-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
46 ; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7]
47 ; X86-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
48 ; X86-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
49 ; X86-NEXT: vmovq %xmm0, (%eax)
50 ; X86-NEXT: vzeroupper
53 ; X64-LABEL: func_16_64:
55 ; X64-NEXT: vmovdqa (%rsi), %ymm0
56 ; X64-NEXT: vpxor (%rdi), %ymm0, %ymm0
57 ; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
58 ; X64-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
59 ; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
60 ; X64-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
61 ; X64-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
62 ; X64-NEXT: vmovq %xmm0, (%rdx)
63 ; X64-NEXT: vzeroupper
65 %F = load <4 x i64>, ptr %a
66 %G = trunc <4 x i64> %F to <4 x i16>
67 %H = load <4 x i64>, ptr %b
68 %Y = trunc <4 x i64> %H to <4 x i16>
69 %T = xor <4 x i16> %Y, %G
70 store <4 x i16>%T , ptr %c
74 define <4 x i32> @func_32_64(ptr %a, ptr %b) nounwind {
75 ; X86-LABEL: func_32_64:
77 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
78 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
79 ; X86-NEXT: vmovaps (%ecx), %ymm0
80 ; X86-NEXT: vorps (%eax), %ymm0, %ymm0
81 ; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
82 ; X86-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
83 ; X86-NEXT: vzeroupper
86 ; X64-LABEL: func_32_64:
88 ; X64-NEXT: vmovaps (%rsi), %ymm0
89 ; X64-NEXT: vorps (%rdi), %ymm0, %ymm0
90 ; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
91 ; X64-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
92 ; X64-NEXT: vzeroupper
94 %F = load <4 x i64>, ptr %a
95 %G = trunc <4 x i64> %F to <4 x i32>
96 %H = load <4 x i64>, ptr %b
97 %Y = trunc <4 x i64> %H to <4 x i32>
98 %T = or <4 x i32> %Y, %G
102 define <4 x i8> @func_8_16(ptr %a, ptr %b) nounwind {
103 ; X86-LABEL: func_8_16:
105 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
106 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
107 ; X86-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
108 ; X86-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
109 ; X86-NEXT: vpaddb %xmm0, %xmm1, %xmm0
110 ; X86-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,u,u,u,u,u,u,u,u,u,u,u,u]
113 ; X64-LABEL: func_8_16:
115 ; X64-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
116 ; X64-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
117 ; X64-NEXT: vpaddb %xmm0, %xmm1, %xmm0
118 ; X64-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,u,u,u,u,u,u,u,u,u,u,u,u]
120 %F = load <4 x i16>, ptr %a
121 %G = trunc <4 x i16> %F to <4 x i8>
122 %H = load <4 x i16>, ptr %b
123 %Y = trunc <4 x i16> %H to <4 x i8>
124 %T = add <4 x i8> %Y, %G
128 define <4 x i8> @func_8_32(ptr %a, ptr %b) nounwind {
129 ; X86-LABEL: func_8_32:
131 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
132 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
133 ; X86-NEXT: vmovdqa (%ecx), %xmm0
134 ; X86-NEXT: vpsubb (%eax), %xmm0, %xmm0
135 ; X86-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
138 ; X64-LABEL: func_8_32:
140 ; X64-NEXT: vmovdqa (%rsi), %xmm0
141 ; X64-NEXT: vpsubb (%rdi), %xmm0, %xmm0
142 ; X64-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
144 %F = load <4 x i32>, ptr %a
145 %G = trunc <4 x i32> %F to <4 x i8>
146 %H = load <4 x i32>, ptr %b
147 %Y = trunc <4 x i32> %H to <4 x i8>
148 %T = sub <4 x i8> %Y, %G
152 define <4 x i8> @func_8_64(ptr %a, ptr %b) nounwind {
153 ; X86-LABEL: func_8_64:
155 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
156 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
157 ; X86-NEXT: vmovdqa (%ecx), %xmm0
158 ; X86-NEXT: vmovdqa 16(%ecx), %xmm1
159 ; X86-NEXT: vmovd {{.*#+}} xmm2 = [0,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
160 ; X86-NEXT: vpshufb %xmm2, %xmm1, %xmm1
161 ; X86-NEXT: vpshufb %xmm2, %xmm0, %xmm0
162 ; X86-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
163 ; X86-NEXT: vmovdqa (%eax), %xmm1
164 ; X86-NEXT: vmovdqa 16(%eax), %xmm3
165 ; X86-NEXT: vpshufb %xmm2, %xmm3, %xmm3
166 ; X86-NEXT: vpshufb %xmm2, %xmm1, %xmm1
167 ; X86-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
168 ; X86-NEXT: vpaddb %xmm0, %xmm1, %xmm0
171 ; X64-LABEL: func_8_64:
173 ; X64-NEXT: vmovdqa (%rdi), %xmm0
174 ; X64-NEXT: vmovdqa 16(%rdi), %xmm1
175 ; X64-NEXT: vpbroadcastw {{.*#+}} xmm2 = [0,8,0,8,0,8,0,8,0,8,0,8,0,8,0,8]
176 ; X64-NEXT: vpshufb %xmm2, %xmm1, %xmm1
177 ; X64-NEXT: vpshufb %xmm2, %xmm0, %xmm0
178 ; X64-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
179 ; X64-NEXT: vmovdqa (%rsi), %xmm1
180 ; X64-NEXT: vmovdqa 16(%rsi), %xmm3
181 ; X64-NEXT: vpshufb %xmm2, %xmm3, %xmm3
182 ; X64-NEXT: vpshufb %xmm2, %xmm1, %xmm1
183 ; X64-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
184 ; X64-NEXT: vpaddb %xmm0, %xmm1, %xmm0
186 %F = load <4 x i64>, ptr %a
187 %G = trunc <4 x i64> %F to <4 x i8>
188 %H = load <4 x i64>, ptr %b
189 %Y = trunc <4 x i64> %H to <4 x i8>
190 %T = add <4 x i8> %Y, %G
194 define <4 x i16> @const_16_32() nounwind {
195 ; CHECK-LABEL: const_16_32:
197 ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = [0,3,8,7,0,0,0,0]
198 ; CHECK-NEXT: ret{{[l|q]}}
199 %G = trunc <4 x i32> <i32 0, i32 3, i32 8, i32 7> to <4 x i16>
203 define <4 x i16> @const_16_64() nounwind {
204 ; CHECK-LABEL: const_16_64:
206 ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = [0,3,8,7,0,0,0,0]
207 ; CHECK-NEXT: ret{{[l|q]}}
208 %G = trunc <4 x i64> <i64 0, i64 3, i64 8, i64 7> to <4 x i16>
212 define void @bugOnTruncBitwidthReduce() nounwind {
213 ; CHECK-LABEL: bugOnTruncBitwidthReduce:
214 ; CHECK: # %bb.0: # %meh
215 ; CHECK-NEXT: ret{{[l|q]}}
217 %0 = xor <4 x i64> zeroinitializer, zeroinitializer
218 %1 = trunc <4 x i64> %0 to <4 x i32>
219 %2 = lshr <4 x i32> %1, <i32 18, i32 18, i32 18, i32 18>
220 %3 = xor <4 x i32> %2, %1