1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=-avx,+sse2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
3 ; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=AVX2
4 ; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=SKX
6 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse2-builtins.c
9 define <16 x i8> @test_x86_sse2_paddus_b(<16 x i8> %a0, <16 x i8> %a1) {
10 ; SSE-LABEL: test_x86_sse2_paddus_b:
12 ; SSE-NEXT: paddusb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdc,0xc1]
13 ; SSE-NEXT: retl ## encoding: [0xc3]
15 ; AVX2-LABEL: test_x86_sse2_paddus_b:
17 ; AVX2-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdc,0xc1]
18 ; AVX2-NEXT: retl ## encoding: [0xc3]
20 ; SKX-LABEL: test_x86_sse2_paddus_b:
22 ; SKX-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1]
23 ; SKX-NEXT: retl ## encoding: [0xc3]
24 %1 = add <16 x i8> %a0, %a1
25 %2 = icmp ugt <16 x i8> %a0, %1
26 %3 = select <16 x i1> %2, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %1
30 define <8 x i16> @test_x86_sse2_paddus_w(<8 x i16> %a0, <8 x i16> %a1) {
31 ; SSE-LABEL: test_x86_sse2_paddus_w:
33 ; SSE-NEXT: paddusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdd,0xc1]
34 ; SSE-NEXT: retl ## encoding: [0xc3]
36 ; AVX2-LABEL: test_x86_sse2_paddus_w:
38 ; AVX2-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdd,0xc1]
39 ; AVX2-NEXT: retl ## encoding: [0xc3]
41 ; SKX-LABEL: test_x86_sse2_paddus_w:
43 ; SKX-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
44 ; SKX-NEXT: retl ## encoding: [0xc3]
45 %1 = add <8 x i16> %a0, %a1
46 %2 = icmp ugt <8 x i16> %a0, %1
47 %3 = select <8 x i1> %2, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %1
51 define <16 x i8> @test_x86_sse2_psubus_b(<16 x i8> %a0, <16 x i8> %a1) {
52 ; SSE-LABEL: test_x86_sse2_psubus_b:
54 ; SSE-NEXT: psubusb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd8,0xc1]
55 ; SSE-NEXT: retl ## encoding: [0xc3]
57 ; AVX2-LABEL: test_x86_sse2_psubus_b:
59 ; AVX2-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd8,0xc1]
60 ; AVX2-NEXT: retl ## encoding: [0xc3]
62 ; SKX-LABEL: test_x86_sse2_psubus_b:
64 ; SKX-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1]
65 ; SKX-NEXT: retl ## encoding: [0xc3]
66 %cmp = icmp ugt <16 x i8> %a0, %a1
67 %sel = select <16 x i1> %cmp, <16 x i8> %a0, <16 x i8> %a1
68 %sub = sub <16 x i8> %sel, %a1
72 define <8 x i16> @test_x86_sse2_psubus_w(<8 x i16> %a0, <8 x i16> %a1) {
73 ; SSE-LABEL: test_x86_sse2_psubus_w:
75 ; SSE-NEXT: psubusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd9,0xc1]
76 ; SSE-NEXT: retl ## encoding: [0xc3]
78 ; AVX2-LABEL: test_x86_sse2_psubus_w:
80 ; AVX2-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd9,0xc1]
81 ; AVX2-NEXT: retl ## encoding: [0xc3]
83 ; SKX-LABEL: test_x86_sse2_psubus_w:
85 ; SKX-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1]
86 ; SKX-NEXT: retl ## encoding: [0xc3]
87 %cmp = icmp ugt <8 x i16> %a0, %a1
88 %sel = select <8 x i1> %cmp, <8 x i16> %a0, <8 x i16> %a1
89 %sub = sub <8 x i16> %sel, %a1
93 define <8 x i8> @test_x86_sse2_paddus_b_64(<8 x i8> %a0, <8 x i8> %a1) {
94 ; SSE-LABEL: test_x86_sse2_paddus_b_64:
96 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
97 ; SSE-NEXT: ## encoding: [0x66,0x0f,0x6f,0x15,A,A,A,A]
98 ; SSE-NEXT: ## fixup A - offset: 4, value: LCPI4_0, kind: FK_Data_4
99 ; SSE-NEXT: pand %xmm2, %xmm1 ## encoding: [0x66,0x0f,0xdb,0xca]
100 ; SSE-NEXT: packuswb %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x67,0xc9]
101 ; SSE-NEXT: pand %xmm2, %xmm0 ## encoding: [0x66,0x0f,0xdb,0xc2]
102 ; SSE-NEXT: packuswb %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x67,0xc0]
103 ; SSE-NEXT: paddusb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdc,0xc1]
104 ; SSE-NEXT: punpcklbw %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x60,0xc0]
105 ; SSE-NEXT: ## xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
106 ; SSE-NEXT: retl ## encoding: [0xc3]
108 ; AVX2-LABEL: test_x86_sse2_paddus_b_64:
110 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
111 ; AVX2-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x15,A,A,A,A]
112 ; AVX2-NEXT: ## fixup A - offset: 4, value: LCPI4_0, kind: FK_Data_4
113 ; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 ## encoding: [0xc4,0xe2,0x71,0x00,0xca]
114 ; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x00,0xc2]
115 ; AVX2-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdc,0xc1]
116 ; AVX2-NEXT: vpmovzxbw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x30,0xc0]
117 ; AVX2-NEXT: ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
118 ; AVX2-NEXT: retl ## encoding: [0xc3]
120 ; SKX-LABEL: test_x86_sse2_paddus_b_64:
122 ; SKX-NEXT: vmovdqa LCPI4_0, %xmm2 ## EVEX TO VEX Compression xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
123 ; SKX-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x15,A,A,A,A]
124 ; SKX-NEXT: ## fixup A - offset: 4, value: LCPI4_0, kind: FK_Data_4
125 ; SKX-NEXT: vpshufb %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x00,0xca]
126 ; SKX-NEXT: vpshufb %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x00,0xc2]
127 ; SKX-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1]
128 ; SKX-NEXT: vpmovzxbw %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x30,0xc0]
129 ; SKX-NEXT: ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
130 ; SKX-NEXT: retl ## encoding: [0xc3]
131 %1 = add <8 x i8> %a0, %a1
132 %2 = icmp ugt <8 x i8> %a0, %1
133 %3 = select <8 x i1> %2, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> %1
137 define <4 x i16> @test_x86_sse2_paddus_w_64(<4 x i16> %a0, <4 x i16> %a1) {
138 ; SSE-LABEL: test_x86_sse2_paddus_w_64:
140 ; SSE-NEXT: pshuflw $232, %xmm1, %xmm1 ## encoding: [0xf2,0x0f,0x70,0xc9,0xe8]
141 ; SSE-NEXT: ## xmm1 = xmm1[0,2,2,3,4,5,6,7]
142 ; SSE-NEXT: pshufhw $232, %xmm1, %xmm1 ## encoding: [0xf3,0x0f,0x70,0xc9,0xe8]
143 ; SSE-NEXT: ## xmm1 = xmm1[0,1,2,3,4,6,6,7]
144 ; SSE-NEXT: pshufd $232, %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x70,0xc9,0xe8]
145 ; SSE-NEXT: ## xmm1 = xmm1[0,2,2,3]
146 ; SSE-NEXT: pshuflw $232, %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0x70,0xc0,0xe8]
147 ; SSE-NEXT: ## xmm0 = xmm0[0,2,2,3,4,5,6,7]
148 ; SSE-NEXT: pshufhw $232, %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0x70,0xc0,0xe8]
149 ; SSE-NEXT: ## xmm0 = xmm0[0,1,2,3,4,6,6,7]
150 ; SSE-NEXT: pshufd $232, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x70,0xc0,0xe8]
151 ; SSE-NEXT: ## xmm0 = xmm0[0,2,2,3]
152 ; SSE-NEXT: paddusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdd,0xc1]
153 ; SSE-NEXT: punpcklwd %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x61,0xc0]
154 ; SSE-NEXT: ## xmm0 = xmm0[0,0,1,1,2,2,3,3]
155 ; SSE-NEXT: retl ## encoding: [0xc3]
157 ; AVX2-LABEL: test_x86_sse2_paddus_w_64:
159 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
160 ; AVX2-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x15,A,A,A,A]
161 ; AVX2-NEXT: ## fixup A - offset: 4, value: LCPI5_0, kind: FK_Data_4
162 ; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 ## encoding: [0xc4,0xe2,0x71,0x00,0xca]
163 ; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x00,0xc2]
164 ; AVX2-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdd,0xc1]
165 ; AVX2-NEXT: vpmovzxwd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x33,0xc0]
166 ; AVX2-NEXT: ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
167 ; AVX2-NEXT: retl ## encoding: [0xc3]
169 ; SKX-LABEL: test_x86_sse2_paddus_w_64:
171 ; SKX-NEXT: vmovdqa LCPI5_0, %xmm2 ## EVEX TO VEX Compression xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
172 ; SKX-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x15,A,A,A,A]
173 ; SKX-NEXT: ## fixup A - offset: 4, value: LCPI5_0, kind: FK_Data_4
174 ; SKX-NEXT: vpshufb %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x00,0xca]
175 ; SKX-NEXT: vpshufb %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x00,0xc2]
176 ; SKX-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
177 ; SKX-NEXT: vpmovzxwd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x33,0xc0]
178 ; SKX-NEXT: ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
179 ; SKX-NEXT: retl ## encoding: [0xc3]
180 %1 = add <4 x i16> %a0, %a1
181 %2 = icmp ugt <4 x i16> %a0, %1
182 %3 = select <4 x i1> %2, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> %1
186 define <8 x i8> @test_x86_sse2_psubus_b_64(<8 x i8> %a0, <8 x i8> %a1) {
187 ; SSE-LABEL: test_x86_sse2_psubus_b_64:
189 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
190 ; SSE-NEXT: ## encoding: [0x66,0x0f,0x6f,0x15,A,A,A,A]
191 ; SSE-NEXT: ## fixup A - offset: 4, value: LCPI6_0, kind: FK_Data_4
192 ; SSE-NEXT: movdqa %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x6f,0xd9]
193 ; SSE-NEXT: pand %xmm2, %xmm3 ## encoding: [0x66,0x0f,0xdb,0xda]
194 ; SSE-NEXT: pand %xmm2, %xmm0 ## encoding: [0x66,0x0f,0xdb,0xc2]
195 ; SSE-NEXT: pmaxsw %xmm3, %xmm0 ## encoding: [0x66,0x0f,0xee,0xc3]
196 ; SSE-NEXT: psubw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf9,0xc1]
197 ; SSE-NEXT: retl ## encoding: [0xc3]
199 ; AVX2-LABEL: test_x86_sse2_psubus_b_64:
201 ; AVX2-NEXT: vpbroadcastw {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
202 ; AVX2-NEXT: ## encoding: [0xc4,0xe2,0x79,0x79,0x15,A,A,A,A]
203 ; AVX2-NEXT: ## fixup A - offset: 5, value: LCPI6_0, kind: FK_Data_4
204 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm3 ## encoding: [0xc5,0xf1,0xdb,0xda]
205 ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdb,0xc2]
206 ; AVX2-NEXT: vpmaxuw %xmm3, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3e,0xc3]
207 ; AVX2-NEXT: vpsubw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf9,0xc1]
208 ; AVX2-NEXT: retl ## encoding: [0xc3]
210 ; SKX-LABEL: test_x86_sse2_psubus_b_64:
212 ; SKX-NEXT: vpbroadcastw LCPI6_0, %xmm2 ## EVEX TO VEX Compression xmm2 = [255,255,255,255,255,255,255,255]
213 ; SKX-NEXT: ## encoding: [0xc4,0xe2,0x79,0x79,0x15,A,A,A,A]
214 ; SKX-NEXT: ## fixup A - offset: 5, value: LCPI6_0, kind: FK_Data_4
215 ; SKX-NEXT: vpand %xmm2, %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xdb,0xda]
216 ; SKX-NEXT: vpand %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdb,0xc2]
217 ; SKX-NEXT: vpmaxuw %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3e,0xc3]
218 ; SKX-NEXT: vpsubw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf9,0xc1]
219 ; SKX-NEXT: retl ## encoding: [0xc3]
220 %cmp = icmp ugt <8 x i8> %a0, %a1
221 %sel = select <8 x i1> %cmp, <8 x i8> %a0, <8 x i8> %a1
222 %sub = sub <8 x i8> %sel, %a1
226 define <4 x i16> @test_x86_sse2_psubus_w_64(<4 x i16> %a0, <4 x i16> %a1) {
227 ; SSE-LABEL: test_x86_sse2_psubus_w_64:
229 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0]
230 ; SSE-NEXT: ## encoding: [0x66,0x0f,0x6f,0x15,A,A,A,A]
231 ; SSE-NEXT: ## fixup A - offset: 4, value: LCPI7_0, kind: FK_Data_4
232 ; SSE-NEXT: movdqa %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x6f,0xd9]
233 ; SSE-NEXT: pand %xmm2, %xmm3 ## encoding: [0x66,0x0f,0xdb,0xda]
234 ; SSE-NEXT: pand %xmm2, %xmm0 ## encoding: [0x66,0x0f,0xdb,0xc2]
235 ; SSE-NEXT: movdqa %xmm0, %xmm2 ## encoding: [0x66,0x0f,0x6f,0xd0]
236 ; SSE-NEXT: pcmpgtd %xmm3, %xmm2 ## encoding: [0x66,0x0f,0x66,0xd3]
237 ; SSE-NEXT: pand %xmm2, %xmm0 ## encoding: [0x66,0x0f,0xdb,0xc2]
238 ; SSE-NEXT: pandn %xmm3, %xmm2 ## encoding: [0x66,0x0f,0xdf,0xd3]
239 ; SSE-NEXT: por %xmm0, %xmm2 ## encoding: [0x66,0x0f,0xeb,0xd0]
240 ; SSE-NEXT: psubd %xmm1, %xmm2 ## encoding: [0x66,0x0f,0xfa,0xd1]
241 ; SSE-NEXT: movdqa %xmm2, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc2]
242 ; SSE-NEXT: retl ## encoding: [0xc3]
244 ; AVX2-LABEL: test_x86_sse2_psubus_w_64:
246 ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0xef,0xd2]
247 ; AVX2-NEXT: vpblendw $170, %xmm2, %xmm1, %xmm3 ## encoding: [0xc4,0xe3,0x71,0x0e,0xda,0xaa]
248 ; AVX2-NEXT: ## xmm3 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
249 ; AVX2-NEXT: vpblendw $170, %xmm2, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0e,0xc2,0xaa]
250 ; AVX2-NEXT: ## xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
251 ; AVX2-NEXT: vpmaxud %xmm3, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3f,0xc3]
252 ; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfa,0xc1]
253 ; AVX2-NEXT: retl ## encoding: [0xc3]
255 ; SKX-LABEL: test_x86_sse2_psubus_w_64:
257 ; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
258 ; SKX-NEXT: vpblendw $170, %xmm2, %xmm1, %xmm3 ## encoding: [0xc4,0xe3,0x71,0x0e,0xda,0xaa]
259 ; SKX-NEXT: ## xmm3 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
260 ; SKX-NEXT: vpblendw $170, %xmm2, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0e,0xc2,0xaa]
261 ; SKX-NEXT: ## xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
262 ; SKX-NEXT: vpmaxud %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3f,0xc3]
263 ; SKX-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfa,0xc1]
264 ; SKX-NEXT: retl ## encoding: [0xc3]
265 %cmp = icmp ugt <4 x i16> %a0, %a1
266 %sel = select <4 x i1> %cmp, <4 x i16> %a0, <4 x i16> %a1
267 %sub = sub <4 x i16> %sel, %a1
271 ; This test has a normal add and a saturating add.
272 ; FIXME: This should be an addw and a addusw, but a bad canonicalization makes this not work.
273 define <8 x i16> @add_addusw(<8 x i16> %x, <8 x i16> %y, <8 x i16> %z) {
274 ; SSE-LABEL: add_addusw:
276 ; SSE-NEXT: paddw %xmm2, %xmm1 ## encoding: [0x66,0x0f,0xfd,0xca]
277 ; SSE-NEXT: paddusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdd,0xc1]
278 ; SSE-NEXT: retl ## encoding: [0xc3]
280 ; AVX2-LABEL: add_addusw:
282 ; AVX2-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0xfd,0xca]
283 ; AVX2-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdd,0xc1]
284 ; AVX2-NEXT: retl ## encoding: [0xc3]
286 ; SKX-LABEL: add_addusw:
288 ; SKX-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xca]
289 ; SKX-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
290 ; SKX-NEXT: retl ## encoding: [0xc3]
291 %a = add <8 x i16> %y, %z
292 %b = add <8 x i16> %x, %a
293 %c = icmp ugt <8 x i16> %a, %b
294 %res = select <8 x i1> %c, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %b