1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-- -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X64
7 define <4 x i16> @func_16_32(ptr %a, ptr %b, ptr %c) nounwind {
8 ; X86-LABEL: func_16_32:
10 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
11 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
12 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
13 ; X86-NEXT: vmovdqa (%edx), %xmm0
14 ; X86-NEXT: vpaddw (%ecx), %xmm0, %xmm0
15 ; X86-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
16 ; X86-NEXT: vmovq %xmm0, (%eax)
19 ; X64-LABEL: func_16_32:
21 ; X64-NEXT: vmovdqa (%rsi), %xmm0
22 ; X64-NEXT: vpaddw (%rdi), %xmm0, %xmm0
23 ; X64-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
24 ; X64-NEXT: vmovq %xmm0, (%rdx)
26 %F = load <4 x i32>, ptr %a
27 %G = trunc <4 x i32> %F to <4 x i16>
28 %H = load <4 x i32>, ptr %b
29 %Y = trunc <4 x i32> %H to <4 x i16>
30 %T = add <4 x i16> %Y, %G
31 store <4 x i16>%T , ptr %c
35 define <4 x i16> @func_16_64(ptr %a, ptr %b, ptr %c) nounwind {
36 ; X86-LABEL: func_16_64:
38 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
39 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
40 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
41 ; X86-NEXT: vmovaps (%edx), %ymm0
42 ; X86-NEXT: vxorps (%ecx), %ymm0, %ymm0
43 ; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
44 ; X86-NEXT: vpxor %xmm2, %xmm2, %xmm2
45 ; X86-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
46 ; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7]
47 ; X86-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
48 ; X86-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
49 ; X86-NEXT: vmovq %xmm0, (%eax)
50 ; X86-NEXT: vzeroupper
53 ; X64-LABEL: func_16_64:
55 ; X64-NEXT: vmovdqa (%rsi), %ymm0
56 ; X64-NEXT: vpxor (%rdi), %ymm0, %ymm0
57 ; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
58 ; X64-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
59 ; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
60 ; X64-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
61 ; X64-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
62 ; X64-NEXT: vmovq %xmm0, (%rdx)
63 ; X64-NEXT: vzeroupper
65 %F = load <4 x i64>, ptr %a
66 %G = trunc <4 x i64> %F to <4 x i16>
67 %H = load <4 x i64>, ptr %b
68 %Y = trunc <4 x i64> %H to <4 x i16>
69 %T = xor <4 x i16> %Y, %G
70 store <4 x i16>%T , ptr %c
74 define <4 x i32> @func_32_64(ptr %a, ptr %b) nounwind {
75 ; X86-LABEL: func_32_64:
77 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
78 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
79 ; X86-NEXT: vmovaps (%ecx), %ymm0
80 ; X86-NEXT: vorps (%eax), %ymm0, %ymm0
81 ; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
82 ; X86-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
83 ; X86-NEXT: vzeroupper
86 ; X64-LABEL: func_32_64:
88 ; X64-NEXT: vmovaps (%rsi), %ymm0
89 ; X64-NEXT: vorps (%rdi), %ymm0, %ymm0
90 ; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
91 ; X64-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
92 ; X64-NEXT: vzeroupper
94 %F = load <4 x i64>, ptr %a
95 %G = trunc <4 x i64> %F to <4 x i32>
96 %H = load <4 x i64>, ptr %b
97 %Y = trunc <4 x i64> %H to <4 x i32>
98 %T = or <4 x i32> %Y, %G
102 define <4 x i8> @func_8_16(ptr %a, ptr %b) nounwind {
103 ; X86-LABEL: func_8_16:
105 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
106 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
107 ; X86-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
108 ; X86-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
109 ; X86-NEXT: vpaddb %xmm0, %xmm1, %xmm0
110 ; X86-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,u,u,u,u,u,u,u,u,u,u,u,u]
113 ; X64-LABEL: func_8_16:
115 ; X64-NEXT: movq (%rdi), %rax
116 ; X64-NEXT: vmovd %eax, %xmm0
117 ; X64-NEXT: movl %eax, %ecx
118 ; X64-NEXT: shrl $16, %ecx
119 ; X64-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
120 ; X64-NEXT: movq %rax, %rcx
121 ; X64-NEXT: shrq $32, %rcx
122 ; X64-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
123 ; X64-NEXT: shrq $48, %rax
124 ; X64-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
125 ; X64-NEXT: movq (%rsi), %rax
126 ; X64-NEXT: vmovd %eax, %xmm1
127 ; X64-NEXT: movl %eax, %ecx
128 ; X64-NEXT: shrl $16, %ecx
129 ; X64-NEXT: vpinsrb $1, %ecx, %xmm1, %xmm1
130 ; X64-NEXT: movq %rax, %rcx
131 ; X64-NEXT: shrq $32, %rcx
132 ; X64-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
133 ; X64-NEXT: shrq $48, %rax
134 ; X64-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
135 ; X64-NEXT: vpaddb %xmm0, %xmm1, %xmm0
137 %F = load <4 x i16>, ptr %a
138 %G = trunc <4 x i16> %F to <4 x i8>
139 %H = load <4 x i16>, ptr %b
140 %Y = trunc <4 x i16> %H to <4 x i8>
141 %T = add <4 x i8> %Y, %G
145 define <4 x i8> @func_8_32(ptr %a, ptr %b) nounwind {
146 ; X86-LABEL: func_8_32:
148 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
149 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
150 ; X86-NEXT: vmovdqa (%ecx), %xmm0
151 ; X86-NEXT: vpsubb (%eax), %xmm0, %xmm0
152 ; X86-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
155 ; X64-LABEL: func_8_32:
157 ; X64-NEXT: vmovdqa (%rsi), %xmm0
158 ; X64-NEXT: vpsubb (%rdi), %xmm0, %xmm0
159 ; X64-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
161 %F = load <4 x i32>, ptr %a
162 %G = trunc <4 x i32> %F to <4 x i8>
163 %H = load <4 x i32>, ptr %b
164 %Y = trunc <4 x i32> %H to <4 x i8>
165 %T = sub <4 x i8> %Y, %G
169 define <4 x i8> @func_8_64(ptr %a, ptr %b) nounwind {
170 ; X86-LABEL: func_8_64:
172 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
173 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
174 ; X86-NEXT: vmovdqa (%ecx), %xmm0
175 ; X86-NEXT: vmovdqa 16(%ecx), %xmm1
176 ; X86-NEXT: vbroadcastss {{.*#+}} xmm2 = [0,8,0,0,0,8,0,0,0,8,0,0,0,8,0,0]
177 ; X86-NEXT: vpshufb %xmm2, %xmm1, %xmm1
178 ; X86-NEXT: vpshufb %xmm2, %xmm0, %xmm0
179 ; X86-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
180 ; X86-NEXT: vmovdqa (%eax), %xmm1
181 ; X86-NEXT: vmovdqa 16(%eax), %xmm3
182 ; X86-NEXT: vpshufb %xmm2, %xmm3, %xmm3
183 ; X86-NEXT: vpshufb %xmm2, %xmm1, %xmm1
184 ; X86-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
185 ; X86-NEXT: vpaddb %xmm0, %xmm1, %xmm0
188 ; X64-LABEL: func_8_64:
190 ; X64-NEXT: vmovdqa (%rdi), %xmm0
191 ; X64-NEXT: vmovdqa 16(%rdi), %xmm1
192 ; X64-NEXT: vpbroadcastw {{.*#+}} xmm2 = [0,8,0,8,0,8,0,8,0,8,0,8,0,8,0,8]
193 ; X64-NEXT: vpshufb %xmm2, %xmm1, %xmm1
194 ; X64-NEXT: vpshufb %xmm2, %xmm0, %xmm0
195 ; X64-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
196 ; X64-NEXT: vmovdqa (%rsi), %xmm1
197 ; X64-NEXT: vmovdqa 16(%rsi), %xmm3
198 ; X64-NEXT: vpshufb %xmm2, %xmm3, %xmm3
199 ; X64-NEXT: vpshufb %xmm2, %xmm1, %xmm1
200 ; X64-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
201 ; X64-NEXT: vpaddb %xmm0, %xmm1, %xmm0
203 %F = load <4 x i64>, ptr %a
204 %G = trunc <4 x i64> %F to <4 x i8>
205 %H = load <4 x i64>, ptr %b
206 %Y = trunc <4 x i64> %H to <4 x i8>
207 %T = add <4 x i8> %Y, %G
211 define <4 x i16> @const_16_32() nounwind {
212 ; CHECK-LABEL: const_16_32:
214 ; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = [0,3,8,7,0,3,8,7]
215 ; CHECK-NEXT: # xmm0 = mem[0,0]
216 ; CHECK-NEXT: ret{{[l|q]}}
217 %G = trunc <4 x i32> <i32 0, i32 3, i32 8, i32 7> to <4 x i16>
221 define <4 x i16> @const_16_64() nounwind {
222 ; CHECK-LABEL: const_16_64:
224 ; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = [0,3,8,7,0,3,8,7]
225 ; CHECK-NEXT: # xmm0 = mem[0,0]
226 ; CHECK-NEXT: ret{{[l|q]}}
227 %G = trunc <4 x i64> <i64 0, i64 3, i64 8, i64 7> to <4 x i16>
231 define void @bugOnTruncBitwidthReduce() nounwind {
232 ; CHECK-LABEL: bugOnTruncBitwidthReduce:
233 ; CHECK: # %bb.0: # %meh
234 ; CHECK-NEXT: ret{{[l|q]}}
236 %0 = xor <4 x i64> zeroinitializer, zeroinitializer
237 %1 = trunc <4 x i64> %0 to <4 x i32>
238 %2 = lshr <4 x i32> %1, <i32 18, i32 18, i32 18, i32 18>
239 %3 = xor <4 x i32> %2, %1