1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
3 ; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
4 ; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
5 ; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
6 ; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
7 ; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
8 ; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=x86_64-unknown-unknown-gnux32 -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X32,SSE,X32-SSE
9 ; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=x86_64-unknown-unknown-gnux32 -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X32,AVX,X32-AVX,AVX1,X32-AVX1
10 ; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=x86_64-unknown-unknown-gnux32 -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X32,AVX,X32-AVX,AVX512,X32-AVX512
12 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse2-builtins.c
14 define <2 x i64> @test_mm_add_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
15 ; SSE-LABEL: test_mm_add_epi8:
17 ; SSE-NEXT: paddb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xfc,0xc1]
18 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
20 ; AVX1-LABEL: test_mm_add_epi8:
22 ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
23 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
25 ; AVX512-LABEL: test_mm_add_epi8:
27 ; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1]
28 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
29 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
30 %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
31 %res = add <16 x i8> %arg0, %arg1
32 %bc = bitcast <16 x i8> %res to <2 x i64>
36 define <2 x i64> @test_mm_add_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
37 ; SSE-LABEL: test_mm_add_epi16:
39 ; SSE-NEXT: paddw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xfd,0xc1]
40 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
42 ; AVX1-LABEL: test_mm_add_epi16:
44 ; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
45 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
47 ; AVX512-LABEL: test_mm_add_epi16:
49 ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc1]
50 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
51 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
52 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
53 %res = add <8 x i16> %arg0, %arg1
54 %bc = bitcast <8 x i16> %res to <2 x i64>
58 define <2 x i64> @test_mm_add_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
59 ; SSE-LABEL: test_mm_add_epi32:
61 ; SSE-NEXT: paddd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xfe,0xc1]
62 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
64 ; AVX1-LABEL: test_mm_add_epi32:
66 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1]
67 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
69 ; AVX512-LABEL: test_mm_add_epi32:
71 ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
72 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
73 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
74 %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
75 %res = add <4 x i32> %arg0, %arg1
76 %bc = bitcast <4 x i32> %res to <2 x i64>
80 define <2 x i64> @test_mm_add_epi64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
81 ; SSE-LABEL: test_mm_add_epi64:
83 ; SSE-NEXT: paddq %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd4,0xc1]
84 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
86 ; AVX1-LABEL: test_mm_add_epi64:
88 ; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd4,0xc1]
89 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
91 ; AVX512-LABEL: test_mm_add_epi64:
93 ; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc1]
94 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
95 %res = add <2 x i64> %a0, %a1
99 define <2 x double> @test_mm_add_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
100 ; SSE-LABEL: test_mm_add_pd:
102 ; SSE-NEXT: addpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x58,0xc1]
103 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
105 ; AVX1-LABEL: test_mm_add_pd:
107 ; AVX1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x58,0xc1]
108 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
110 ; AVX512-LABEL: test_mm_add_pd:
112 ; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1]
113 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
114 %res = fadd <2 x double> %a0, %a1
115 ret <2 x double> %res
118 define <2 x double> @test_mm_add_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
119 ; SSE-LABEL: test_mm_add_sd:
121 ; SSE-NEXT: addsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x58,0xc1]
122 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
124 ; AVX1-LABEL: test_mm_add_sd:
126 ; AVX1-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x58,0xc1]
127 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
129 ; AVX512-LABEL: test_mm_add_sd:
131 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x58,0xc1]
132 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
133 %ext0 = extractelement <2 x double> %a0, i32 0
134 %ext1 = extractelement <2 x double> %a1, i32 0
135 %fadd = fadd double %ext0, %ext1
136 %res = insertelement <2 x double> %a0, double %fadd, i32 0
137 ret <2 x double> %res
140 define <2 x i64> @test_mm_adds_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
141 ; SSE-LABEL: test_mm_adds_epi8:
143 ; SSE-NEXT: paddsb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xec,0xc1]
144 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
146 ; AVX1-LABEL: test_mm_adds_epi8:
148 ; AVX1-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xec,0xc1]
149 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
151 ; AVX512-LABEL: test_mm_adds_epi8:
153 ; AVX512-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0xc1]
154 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
155 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
156 %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
157 %res = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
158 %bc = bitcast <16 x i8> %res to <2 x i64>
161 declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
163 define <2 x i64> @test_mm_adds_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
164 ; SSE-LABEL: test_mm_adds_epi16:
166 ; SSE-NEXT: paddsw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xed,0xc1]
167 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
169 ; AVX1-LABEL: test_mm_adds_epi16:
171 ; AVX1-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xed,0xc1]
172 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
174 ; AVX512-LABEL: test_mm_adds_epi16:
176 ; AVX512-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xed,0xc1]
177 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
178 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
179 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
180 %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
181 %bc = bitcast <8 x i16> %res to <2 x i64>
184 declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
186 define <2 x i64> @test_mm_adds_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
187 ; SSE-LABEL: test_mm_adds_epu8:
189 ; SSE-NEXT: paddusb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xdc,0xc1]
190 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
192 ; AVX1-LABEL: test_mm_adds_epu8:
194 ; AVX1-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xdc,0xc1]
195 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
197 ; AVX512-LABEL: test_mm_adds_epu8:
199 ; AVX512-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1]
200 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
201 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
202 %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
203 %res = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
204 %bc = bitcast <16 x i8> %res to <2 x i64>
207 declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
209 define <2 x i64> @test_mm_adds_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
210 ; SSE-LABEL: test_mm_adds_epu16:
212 ; SSE-NEXT: paddusw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xdd,0xc1]
213 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
215 ; AVX1-LABEL: test_mm_adds_epu16:
217 ; AVX1-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xdd,0xc1]
218 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
220 ; AVX512-LABEL: test_mm_adds_epu16:
222 ; AVX512-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
223 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
224 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
225 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
226 %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
227 %bc = bitcast <8 x i16> %res to <2 x i64>
230 declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
232 define <2 x double> @test_mm_and_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
233 ; SSE-LABEL: test_mm_and_pd:
235 ; SSE-NEXT: andps %xmm1, %xmm0 # encoding: [0x0f,0x54,0xc1]
236 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
238 ; AVX1-LABEL: test_mm_and_pd:
240 ; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x54,0xc1]
241 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
243 ; AVX512-LABEL: test_mm_and_pd:
245 ; AVX512-NEXT: vandps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x54,0xc1]
246 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
247 %arg0 = bitcast <2 x double> %a0 to <4 x i32>
248 %arg1 = bitcast <2 x double> %a1 to <4 x i32>
249 %res = and <4 x i32> %arg0, %arg1
250 %bc = bitcast <4 x i32> %res to <2 x double>
254 define <2 x i64> @test_mm_and_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
255 ; SSE-LABEL: test_mm_and_si128:
257 ; SSE-NEXT: andps %xmm1, %xmm0 # encoding: [0x0f,0x54,0xc1]
258 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
260 ; AVX1-LABEL: test_mm_and_si128:
262 ; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x54,0xc1]
263 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
265 ; AVX512-LABEL: test_mm_and_si128:
267 ; AVX512-NEXT: vandps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x54,0xc1]
268 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
269 %res = and <2 x i64> %a0, %a1
273 define <2 x double> @test_mm_andnot_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
274 ; SSE-LABEL: test_mm_andnot_pd:
276 ; SSE-NEXT: pcmpeqd %xmm2, %xmm2 # encoding: [0x66,0x0f,0x76,0xd2]
277 ; SSE-NEXT: pxor %xmm2, %xmm0 # encoding: [0x66,0x0f,0xef,0xc2]
278 ; SSE-NEXT: pand %xmm1, %xmm0 # encoding: [0x66,0x0f,0xdb,0xc1]
279 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
281 ; AVX1-LABEL: test_mm_andnot_pd:
283 ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 # encoding: [0xc5,0xe9,0x76,0xd2]
284 ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xef,0xc2]
285 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xdb,0xc1]
286 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
288 ; AVX512-LABEL: test_mm_andnot_pd:
290 ; AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0 # encoding: [0x62,0xf3,0xfd,0x08,0x25,0xc0,0x0f]
291 ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdb,0xc1]
292 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
293 %arg0 = bitcast <2 x double> %a0 to <4 x i32>
294 %arg1 = bitcast <2 x double> %a1 to <4 x i32>
295 %not = xor <4 x i32> %arg0, <i32 -1, i32 -1, i32 -1, i32 -1>
296 %res = and <4 x i32> %not, %arg1
297 %bc = bitcast <4 x i32> %res to <2 x double>
301 define <2 x i64> @test_mm_andnot_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
302 ; SSE-LABEL: test_mm_andnot_si128:
304 ; SSE-NEXT: pcmpeqd %xmm2, %xmm2 # encoding: [0x66,0x0f,0x76,0xd2]
305 ; SSE-NEXT: pxor %xmm2, %xmm0 # encoding: [0x66,0x0f,0xef,0xc2]
306 ; SSE-NEXT: pand %xmm1, %xmm0 # encoding: [0x66,0x0f,0xdb,0xc1]
307 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
309 ; AVX1-LABEL: test_mm_andnot_si128:
311 ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 # encoding: [0xc5,0xe9,0x76,0xd2]
312 ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xef,0xc2]
313 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xdb,0xc1]
314 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
316 ; AVX512-LABEL: test_mm_andnot_si128:
318 ; AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0 # encoding: [0x62,0xf3,0xfd,0x08,0x25,0xc0,0x0f]
319 ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdb,0xc1]
320 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
321 %not = xor <2 x i64> %a0, <i64 -1, i64 -1>
322 %res = and <2 x i64> %not, %a1
326 define <2 x i64> @test_mm_avg_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
327 ; SSE-LABEL: test_mm_avg_epu8:
329 ; SSE-NEXT: pavgb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xe0,0xc1]
330 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
332 ; AVX1-LABEL: test_mm_avg_epu8:
334 ; AVX1-NEXT: vpavgb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe0,0xc1]
335 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
337 ; AVX512-LABEL: test_mm_avg_epu8:
339 ; AVX512-NEXT: vpavgb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe0,0xc1]
340 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
341 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
342 %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
343 %res = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %arg0, <16 x i8> %arg1)
344 %bc = bitcast <16 x i8> %res to <2 x i64>
347 declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %arg0, <16 x i8> %arg1) nounwind readnone
349 define <2 x i64> @test_mm_avg_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
350 ; SSE-LABEL: test_mm_avg_epu16:
352 ; SSE-NEXT: pavgw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xe3,0xc1]
353 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
355 ; AVX1-LABEL: test_mm_avg_epu16:
357 ; AVX1-NEXT: vpavgw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe3,0xc1]
358 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
360 ; AVX512-LABEL: test_mm_avg_epu16:
362 ; AVX512-NEXT: vpavgw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe3,0xc1]
363 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
364 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
365 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
366 %res = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %arg0, <8 x i16> %arg1)
367 %bc = bitcast <8 x i16> %res to <2 x i64>
370 declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone
372 define <2 x i64> @test_mm_bslli_si128(<2 x i64> %a0) nounwind {
373 ; SSE-LABEL: test_mm_bslli_si128:
375 ; SSE-NEXT: pslldq $5, %xmm0 # encoding: [0x66,0x0f,0x73,0xf8,0x05]
376 ; SSE-NEXT: # xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
377 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
379 ; AVX1-LABEL: test_mm_bslli_si128:
381 ; AVX1-NEXT: vpslldq $5, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf8,0x05]
382 ; AVX1-NEXT: # xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
383 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
385 ; AVX512-LABEL: test_mm_bslli_si128:
387 ; AVX512-NEXT: vpslldq $5, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf8,0x05]
388 ; AVX512-NEXT: # xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
389 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
390 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
391 %res = shufflevector <16 x i8> zeroinitializer, <16 x i8> %arg0, <16 x i32> <i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26>
392 %bc = bitcast <16 x i8> %res to <2 x i64>
396 define <2 x i64> @test_mm_bsrli_si128(<2 x i64> %a0) nounwind {
397 ; SSE-LABEL: test_mm_bsrli_si128:
399 ; SSE-NEXT: psrldq $5, %xmm0 # encoding: [0x66,0x0f,0x73,0xd8,0x05]
400 ; SSE-NEXT: # xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
401 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
403 ; AVX1-LABEL: test_mm_bsrli_si128:
405 ; AVX1-NEXT: vpsrldq $5, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xd8,0x05]
406 ; AVX1-NEXT: # xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
407 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
409 ; AVX512-LABEL: test_mm_bsrli_si128:
411 ; AVX512-NEXT: vpsrldq $5, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xd8,0x05]
412 ; AVX512-NEXT: # xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
413 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
414 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
415 %res = shufflevector <16 x i8> %arg0, <16 x i8> zeroinitializer, <16 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20>
416 %bc = bitcast <16 x i8> %res to <2 x i64>
420 define <4 x float> @test_mm_castpd_ps(<2 x double> %a0) nounwind {
421 ; CHECK-LABEL: test_mm_castpd_ps:
423 ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
424 %res = bitcast <2 x double> %a0 to <4 x float>
428 define <2 x i64> @test_mm_castpd_si128(<2 x double> %a0) nounwind {
429 ; CHECK-LABEL: test_mm_castpd_si128:
431 ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
432 %res = bitcast <2 x double> %a0 to <2 x i64>
436 define <2 x double> @test_mm_castps_pd(<4 x float> %a0) nounwind {
437 ; CHECK-LABEL: test_mm_castps_pd:
439 ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
440 %res = bitcast <4 x float> %a0 to <2 x double>
441 ret <2 x double> %res
444 define <2 x i64> @test_mm_castps_si128(<4 x float> %a0) nounwind {
445 ; CHECK-LABEL: test_mm_castps_si128:
447 ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
448 %res = bitcast <4 x float> %a0 to <2 x i64>
452 define <2 x double> @test_mm_castsi128_pd(<2 x i64> %a0) nounwind {
453 ; CHECK-LABEL: test_mm_castsi128_pd:
455 ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
456 %res = bitcast <2 x i64> %a0 to <2 x double>
457 ret <2 x double> %res
460 define <4 x float> @test_mm_castsi128_ps(<2 x i64> %a0) nounwind {
461 ; CHECK-LABEL: test_mm_castsi128_ps:
463 ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
464 %res = bitcast <2 x i64> %a0 to <4 x float>
468 define void @test_mm_clflush(i8* %a0) nounwind {
469 ; X86-LABEL: test_mm_clflush:
471 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
472 ; X86-NEXT: clflush (%eax) # encoding: [0x0f,0xae,0x38]
473 ; X86-NEXT: retl # encoding: [0xc3]
475 ; X64-LABEL: test_mm_clflush:
477 ; X64-NEXT: clflush (%rdi) # encoding: [0x0f,0xae,0x3f]
478 ; X64-NEXT: retq # encoding: [0xc3]
480 ; X32-LABEL: test_mm_clflush:
482 ; X32-NEXT: clflush (%edi) # encoding: [0x67,0x0f,0xae,0x3f]
483 ; X32-NEXT: retq # encoding: [0xc3]
484 call void @llvm.x86.sse2.clflush(i8* %a0)
487 declare void @llvm.x86.sse2.clflush(i8*) nounwind readnone
489 define <2 x i64> @test_mm_cmpeq_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
490 ; SSE-LABEL: test_mm_cmpeq_epi8:
492 ; SSE-NEXT: pcmpeqb %xmm1, %xmm0 # encoding: [0x66,0x0f,0x74,0xc1]
493 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
495 ; AVX1-LABEL: test_mm_cmpeq_epi8:
497 ; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x74,0xc1]
498 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
500 ; AVX512-LABEL: test_mm_cmpeq_epi8:
502 ; AVX512-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0x7d,0x08,0x74,0xc1]
503 ; AVX512-NEXT: vpmovm2b %k0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x28,0xc0]
504 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
505 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
506 %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
507 %cmp = icmp eq <16 x i8> %arg0, %arg1
508 %res = sext <16 x i1> %cmp to <16 x i8>
509 %bc = bitcast <16 x i8> %res to <2 x i64>
513 define <2 x i64> @test_mm_cmpeq_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
514 ; SSE-LABEL: test_mm_cmpeq_epi16:
516 ; SSE-NEXT: pcmpeqw %xmm1, %xmm0 # encoding: [0x66,0x0f,0x75,0xc1]
517 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
519 ; AVX1-LABEL: test_mm_cmpeq_epi16:
521 ; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x75,0xc1]
522 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
524 ; AVX512-LABEL: test_mm_cmpeq_epi16:
526 ; AVX512-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0x7d,0x08,0x75,0xc1]
527 ; AVX512-NEXT: vpmovm2w %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x28,0xc0]
528 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
529 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
530 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
531 %cmp = icmp eq <8 x i16> %arg0, %arg1
532 %res = sext <8 x i1> %cmp to <8 x i16>
533 %bc = bitcast <8 x i16> %res to <2 x i64>
537 define <2 x i64> @test_mm_cmpeq_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
538 ; SSE-LABEL: test_mm_cmpeq_epi32:
540 ; SSE-NEXT: pcmpeqd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x76,0xc1]
541 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
543 ; AVX1-LABEL: test_mm_cmpeq_epi32:
545 ; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x76,0xc1]
546 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
548 ; AVX512-LABEL: test_mm_cmpeq_epi32:
550 ; AVX512-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0x7d,0x08,0x76,0xc1]
551 ; AVX512-NEXT: vpmovm2d %k0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x38,0xc0]
552 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
553 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
554 %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
555 %cmp = icmp eq <4 x i32> %arg0, %arg1
556 %res = sext <4 x i1> %cmp to <4 x i32>
557 %bc = bitcast <4 x i32> %res to <2 x i64>
561 define <2 x double> @test_mm_cmpeq_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
562 ; SSE-LABEL: test_mm_cmpeq_pd:
564 ; SSE-NEXT: cmpeqpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xc2,0xc1,0x00]
565 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
567 ; AVX1-LABEL: test_mm_cmpeq_pd:
569 ; AVX1-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc2,0xc1,0x00]
570 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
572 ; AVX512-LABEL: test_mm_cmpeq_pd:
574 ; AVX512-NEXT: vcmpeqpd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x00]
575 ; AVX512-NEXT: vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
576 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
577 %fcmp = fcmp oeq <2 x double> %a0, %a1
578 %sext = sext <2 x i1> %fcmp to <2 x i64>
579 %res = bitcast <2 x i64> %sext to <2 x double>
580 ret <2 x double> %res
583 define <2 x double> @test_mm_cmpeq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
584 ; SSE-LABEL: test_mm_cmpeq_sd:
586 ; SSE-NEXT: cmpeqsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0xc2,0xc1,0x00]
587 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
589 ; AVX-LABEL: test_mm_cmpeq_sd:
591 ; AVX-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xc2,0xc1,0x00]
592 ; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
593 %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 0)
594 ret <2 x double> %res
596 declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounwind readnone
598 define <2 x double> @test_mm_cmpge_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
599 ; SSE-LABEL: test_mm_cmpge_pd:
601 ; SSE-NEXT: cmplepd %xmm0, %xmm1 # encoding: [0x66,0x0f,0xc2,0xc8,0x02]
602 ; SSE-NEXT: movapd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x28,0xc1]
603 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
605 ; AVX1-LABEL: test_mm_cmpge_pd:
607 ; AVX1-NEXT: vcmplepd %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xc2,0xc0,0x02]
608 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
610 ; AVX512-LABEL: test_mm_cmpge_pd:
612 ; AVX512-NEXT: vcmplepd %xmm0, %xmm1, %k0 # encoding: [0x62,0xf1,0xf5,0x08,0xc2,0xc0,0x02]
613 ; AVX512-NEXT: vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
614 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
615 %fcmp = fcmp ole <2 x double> %a1, %a0
616 %sext = sext <2 x i1> %fcmp to <2 x i64>
617 %res = bitcast <2 x i64> %sext to <2 x double>
618 ret <2 x double> %res
621 define <2 x double> @test_mm_cmpge_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
622 ; SSE-LABEL: test_mm_cmpge_sd:
624 ; SSE-NEXT: cmplesd %xmm0, %xmm1 # encoding: [0xf2,0x0f,0xc2,0xc8,0x02]
625 ; SSE-NEXT: movsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x10,0xc1]
626 ; SSE-NEXT: # xmm0 = xmm1[0],xmm0[1]
627 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
629 ; AVX-LABEL: test_mm_cmpge_sd:
631 ; AVX-NEXT: vcmplesd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x02]
632 ; AVX-NEXT: vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01]
633 ; AVX-NEXT: # xmm0 = xmm1[0],xmm0[1]
634 ; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
635 %cmp = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a1, <2 x double> %a0, i8 2)
636 %ext0 = extractelement <2 x double> %cmp, i32 0
637 %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
638 %ext1 = extractelement <2 x double> %a0, i32 1
639 %ins1 = insertelement <2 x double> %ins0, double %ext1, i32 1
640 ret <2 x double> %ins1
643 define <2 x i64> @test_mm_cmpgt_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
644 ; SSE-LABEL: test_mm_cmpgt_epi8:
646 ; SSE-NEXT: pcmpgtb %xmm1, %xmm0 # encoding: [0x66,0x0f,0x64,0xc1]
647 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
649 ; AVX1-LABEL: test_mm_cmpgt_epi8:
651 ; AVX1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x64,0xc1]
652 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
654 ; AVX512-LABEL: test_mm_cmpgt_epi8:
656 ; AVX512-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0x7d,0x08,0x64,0xc1]
657 ; AVX512-NEXT: vpmovm2b %k0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x28,0xc0]
658 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
659 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
660 %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
661 %cmp = icmp sgt <16 x i8> %arg0, %arg1
662 %res = sext <16 x i1> %cmp to <16 x i8>
663 %bc = bitcast <16 x i8> %res to <2 x i64>
667 define <2 x i64> @test_mm_cmpgt_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
668 ; SSE-LABEL: test_mm_cmpgt_epi16:
670 ; SSE-NEXT: pcmpgtw %xmm1, %xmm0 # encoding: [0x66,0x0f,0x65,0xc1]
671 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
673 ; AVX1-LABEL: test_mm_cmpgt_epi16:
675 ; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x65,0xc1]
676 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
678 ; AVX512-LABEL: test_mm_cmpgt_epi16:
680 ; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0x7d,0x08,0x65,0xc1]
681 ; AVX512-NEXT: vpmovm2w %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x28,0xc0]
682 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
683 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
684 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
685 %cmp = icmp sgt <8 x i16> %arg0, %arg1
686 %res = sext <8 x i1> %cmp to <8 x i16>
687 %bc = bitcast <8 x i16> %res to <2 x i64>
691 define <2 x i64> @test_mm_cmpgt_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
692 ; SSE-LABEL: test_mm_cmpgt_epi32:
694 ; SSE-NEXT: pcmpgtd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x66,0xc1]
695 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
697 ; AVX1-LABEL: test_mm_cmpgt_epi32:
699 ; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x66,0xc1]
700 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
702 ; AVX512-LABEL: test_mm_cmpgt_epi32:
704 ; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0x7d,0x08,0x66,0xc1]
705 ; AVX512-NEXT: vpmovm2d %k0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x38,0xc0]
706 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
707 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
708 %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
709 %cmp = icmp sgt <4 x i32> %arg0, %arg1
710 %res = sext <4 x i1> %cmp to <4 x i32>
711 %bc = bitcast <4 x i32> %res to <2 x i64>
715 define <2 x double> @test_mm_cmpgt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
716 ; SSE-LABEL: test_mm_cmpgt_pd:
718 ; SSE-NEXT: cmpltpd %xmm0, %xmm1 # encoding: [0x66,0x0f,0xc2,0xc8,0x01]
719 ; SSE-NEXT: movapd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x28,0xc1]
720 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
722 ; AVX1-LABEL: test_mm_cmpgt_pd:
724 ; AVX1-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xc2,0xc0,0x01]
725 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
727 ; AVX512-LABEL: test_mm_cmpgt_pd:
729 ; AVX512-NEXT: vcmpltpd %xmm0, %xmm1, %k0 # encoding: [0x62,0xf1,0xf5,0x08,0xc2,0xc0,0x01]
730 ; AVX512-NEXT: vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
731 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
732 %fcmp = fcmp olt <2 x double> %a1, %a0
733 %sext = sext <2 x i1> %fcmp to <2 x i64>
734 %res = bitcast <2 x i64> %sext to <2 x double>
735 ret <2 x double> %res
738 define <2 x double> @test_mm_cmpgt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
739 ; SSE-LABEL: test_mm_cmpgt_sd:
741 ; SSE-NEXT: cmpltsd %xmm0, %xmm1 # encoding: [0xf2,0x0f,0xc2,0xc8,0x01]
742 ; SSE-NEXT: movsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x10,0xc1]
743 ; SSE-NEXT: # xmm0 = xmm1[0],xmm0[1]
744 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
746 ; AVX-LABEL: test_mm_cmpgt_sd:
748 ; AVX-NEXT: vcmpltsd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x01]
749 ; AVX-NEXT: vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01]
750 ; AVX-NEXT: # xmm0 = xmm1[0],xmm0[1]
751 ; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
752 %cmp = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a1, <2 x double> %a0, i8 1)
753 %ext0 = extractelement <2 x double> %cmp, i32 0
754 %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
755 %ext1 = extractelement <2 x double> %a0, i32 1
756 %ins1 = insertelement <2 x double> %ins0, double %ext1, i32 1
757 ret <2 x double> %ins1
760 define <2 x double> @test_mm_cmple_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
761 ; SSE-LABEL: test_mm_cmple_pd:
763 ; SSE-NEXT: cmplepd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xc2,0xc1,0x02]
764 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
766 ; AVX1-LABEL: test_mm_cmple_pd:
768 ; AVX1-NEXT: vcmplepd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc2,0xc1,0x02]
769 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
771 ; AVX512-LABEL: test_mm_cmple_pd:
773 ; AVX512-NEXT: vcmplepd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x02]
774 ; AVX512-NEXT: vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
775 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
776 %fcmp = fcmp ole <2 x double> %a0, %a1
777 %sext = sext <2 x i1> %fcmp to <2 x i64>
778 %res = bitcast <2 x i64> %sext to <2 x double>
779 ret <2 x double> %res
782 define <2 x double> @test_mm_cmple_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
783 ; SSE-LABEL: test_mm_cmple_sd:
785 ; SSE-NEXT: cmplesd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0xc2,0xc1,0x02]
786 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
788 ; AVX-LABEL: test_mm_cmple_sd:
790 ; AVX-NEXT: vcmplesd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xc2,0xc1,0x02]
791 ; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
792 %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 2)
793 ret <2 x double> %res
796 define <2 x i64> @test_mm_cmplt_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
797 ; SSE-LABEL: test_mm_cmplt_epi8:
799 ; SSE-NEXT: pcmpgtb %xmm0, %xmm1 # encoding: [0x66,0x0f,0x64,0xc8]
800 ; SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
801 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
803 ; AVX1-LABEL: test_mm_cmplt_epi8:
805 ; AVX1-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0x64,0xc0]
806 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
808 ; AVX512-LABEL: test_mm_cmplt_epi8:
810 ; AVX512-NEXT: vpcmpgtb %xmm0, %xmm1, %k0 # encoding: [0x62,0xf1,0x75,0x08,0x64,0xc0]
811 ; AVX512-NEXT: vpmovm2b %k0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x28,0xc0]
812 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
813 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
814 %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
815 %cmp = icmp sgt <16 x i8> %arg1, %arg0
816 %res = sext <16 x i1> %cmp to <16 x i8>
817 %bc = bitcast <16 x i8> %res to <2 x i64>
821 define <2 x i64> @test_mm_cmplt_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
822 ; SSE-LABEL: test_mm_cmplt_epi16:
824 ; SSE-NEXT: pcmpgtw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x65,0xc8]
825 ; SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
826 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
828 ; AVX1-LABEL: test_mm_cmplt_epi16:
830 ; AVX1-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0x65,0xc0]
831 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
833 ; AVX512-LABEL: test_mm_cmplt_epi16:
835 ; AVX512-NEXT: vpcmpgtw %xmm0, %xmm1, %k0 # encoding: [0x62,0xf1,0x75,0x08,0x65,0xc0]
836 ; AVX512-NEXT: vpmovm2w %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x28,0xc0]
837 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
838 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
839 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
840 %cmp = icmp sgt <8 x i16> %arg1, %arg0
841 %res = sext <8 x i1> %cmp to <8 x i16>
842 %bc = bitcast <8 x i16> %res to <2 x i64>
846 define <2 x i64> @test_mm_cmplt_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
847 ; SSE-LABEL: test_mm_cmplt_epi32:
849 ; SSE-NEXT: pcmpgtd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x66,0xc8]
850 ; SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
851 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
853 ; AVX1-LABEL: test_mm_cmplt_epi32:
855 ; AVX1-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0x66,0xc0]
856 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
858 ; AVX512-LABEL: test_mm_cmplt_epi32:
860 ; AVX512-NEXT: vpcmpgtd %xmm0, %xmm1, %k0 # encoding: [0x62,0xf1,0x75,0x08,0x66,0xc0]
861 ; AVX512-NEXT: vpmovm2d %k0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x38,0xc0]
862 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
863 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
864 %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
865 %cmp = icmp sgt <4 x i32> %arg1, %arg0
866 %res = sext <4 x i1> %cmp to <4 x i32>
867 %bc = bitcast <4 x i32> %res to <2 x i64>
871 define <2 x double> @test_mm_cmplt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
872 ; SSE-LABEL: test_mm_cmplt_pd:
874 ; SSE-NEXT: cmpltpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xc2,0xc1,0x01]
875 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
877 ; AVX1-LABEL: test_mm_cmplt_pd:
879 ; AVX1-NEXT: vcmpltpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc2,0xc1,0x01]
880 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
882 ; AVX512-LABEL: test_mm_cmplt_pd:
884 ; AVX512-NEXT: vcmpltpd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x01]
885 ; AVX512-NEXT: vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
886 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
887 %fcmp = fcmp olt <2 x double> %a0, %a1
888 %sext = sext <2 x i1> %fcmp to <2 x i64>
889 %res = bitcast <2 x i64> %sext to <2 x double>
890 ret <2 x double> %res
893 define <2 x double> @test_mm_cmplt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
894 ; SSE-LABEL: test_mm_cmplt_sd:
896 ; SSE-NEXT: cmpltsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0xc2,0xc1,0x01]
897 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
899 ; AVX-LABEL: test_mm_cmplt_sd:
901 ; AVX-NEXT: vcmpltsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xc2,0xc1,0x01]
902 ; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
903 %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 1)
904 ret <2 x double> %res
907 define <2 x double> @test_mm_cmpneq_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
908 ; SSE-LABEL: test_mm_cmpneq_pd:
910 ; SSE-NEXT: cmpneqpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xc2,0xc1,0x04]
911 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
913 ; AVX1-LABEL: test_mm_cmpneq_pd:
915 ; AVX1-NEXT: vcmpneqpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc2,0xc1,0x04]
916 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
918 ; AVX512-LABEL: test_mm_cmpneq_pd:
920 ; AVX512-NEXT: vcmpneqpd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x04]
921 ; AVX512-NEXT: vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
922 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
923 %fcmp = fcmp une <2 x double> %a0, %a1
924 %sext = sext <2 x i1> %fcmp to <2 x i64>
925 %res = bitcast <2 x i64> %sext to <2 x double>
926 ret <2 x double> %res
929 define <2 x double> @test_mm_cmpneq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
930 ; SSE-LABEL: test_mm_cmpneq_sd:
932 ; SSE-NEXT: cmpneqsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0xc2,0xc1,0x04]
933 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
935 ; AVX-LABEL: test_mm_cmpneq_sd:
937 ; AVX-NEXT: vcmpneqsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xc2,0xc1,0x04]
938 ; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
939 %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 4)
940 ret <2 x double> %res
943 define <2 x double> @test_mm_cmpnge_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
944 ; SSE-LABEL: test_mm_cmpnge_pd:
946 ; SSE-NEXT: cmpnlepd %xmm0, %xmm1 # encoding: [0x66,0x0f,0xc2,0xc8,0x06]
947 ; SSE-NEXT: movapd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x28,0xc1]
948 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
950 ; AVX1-LABEL: test_mm_cmpnge_pd:
952 ; AVX1-NEXT: vcmpnlepd %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xc2,0xc0,0x06]
953 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
955 ; AVX512-LABEL: test_mm_cmpnge_pd:
957 ; AVX512-NEXT: vcmpnlepd %xmm0, %xmm1, %k0 # encoding: [0x62,0xf1,0xf5,0x08,0xc2,0xc0,0x06]
958 ; AVX512-NEXT: vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
959 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
960 %fcmp = fcmp ugt <2 x double> %a1, %a0
961 %sext = sext <2 x i1> %fcmp to <2 x i64>
962 %res = bitcast <2 x i64> %sext to <2 x double>
963 ret <2 x double> %res
966 define <2 x double> @test_mm_cmpnge_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
967 ; SSE-LABEL: test_mm_cmpnge_sd:
969 ; SSE-NEXT: cmpnlesd %xmm0, %xmm1 # encoding: [0xf2,0x0f,0xc2,0xc8,0x06]
970 ; SSE-NEXT: movsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x10,0xc1]
971 ; SSE-NEXT: # xmm0 = xmm1[0],xmm0[1]
972 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
974 ; AVX-LABEL: test_mm_cmpnge_sd:
976 ; AVX-NEXT: vcmpnlesd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x06]
977 ; AVX-NEXT: vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01]
978 ; AVX-NEXT: # xmm0 = xmm1[0],xmm0[1]
979 ; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
980 %cmp = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a1, <2 x double> %a0, i8 6)
981 %ext0 = extractelement <2 x double> %cmp, i32 0
982 %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
983 %ext1 = extractelement <2 x double> %a0, i32 1
984 %ins1 = insertelement <2 x double> %ins0, double %ext1, i32 1
985 ret <2 x double> %ins1
988 define <2 x double> @test_mm_cmpngt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
989 ; SSE-LABEL: test_mm_cmpngt_pd:
991 ; SSE-NEXT: cmpnltpd %xmm0, %xmm1 # encoding: [0x66,0x0f,0xc2,0xc8,0x05]
992 ; SSE-NEXT: movapd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x28,0xc1]
993 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
995 ; AVX1-LABEL: test_mm_cmpngt_pd:
997 ; AVX1-NEXT: vcmpnltpd %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xc2,0xc0,0x05]
998 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1000 ; AVX512-LABEL: test_mm_cmpngt_pd:
1002 ; AVX512-NEXT: vcmpnltpd %xmm0, %xmm1, %k0 # encoding: [0x62,0xf1,0xf5,0x08,0xc2,0xc0,0x05]
1003 ; AVX512-NEXT: vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
1004 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1005 %fcmp = fcmp uge <2 x double> %a1, %a0
1006 %sext = sext <2 x i1> %fcmp to <2 x i64>
1007 %res = bitcast <2 x i64> %sext to <2 x double>
1008 ret <2 x double> %res
1011 define <2 x double> @test_mm_cmpngt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
1012 ; SSE-LABEL: test_mm_cmpngt_sd:
1014 ; SSE-NEXT: cmpnltsd %xmm0, %xmm1 # encoding: [0xf2,0x0f,0xc2,0xc8,0x05]
1015 ; SSE-NEXT: movsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x10,0xc1]
1016 ; SSE-NEXT: # xmm0 = xmm1[0],xmm0[1]
1017 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1019 ; AVX-LABEL: test_mm_cmpngt_sd:
1021 ; AVX-NEXT: vcmpnltsd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x05]
1022 ; AVX-NEXT: vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01]
1023 ; AVX-NEXT: # xmm0 = xmm1[0],xmm0[1]
1024 ; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1025 %cmp = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a1, <2 x double> %a0, i8 5)
1026 %ext0 = extractelement <2 x double> %cmp, i32 0
1027 %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
1028 %ext1 = extractelement <2 x double> %a0, i32 1
1029 %ins1 = insertelement <2 x double> %ins0, double %ext1, i32 1
1030 ret <2 x double> %ins1
1033 define <2 x double> @test_mm_cmpnle_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
1034 ; SSE-LABEL: test_mm_cmpnle_pd:
1036 ; SSE-NEXT: cmpnlepd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xc2,0xc1,0x06]
1037 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1039 ; AVX1-LABEL: test_mm_cmpnle_pd:
1041 ; AVX1-NEXT: vcmpnlepd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc2,0xc1,0x06]
1042 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1044 ; AVX512-LABEL: test_mm_cmpnle_pd:
1046 ; AVX512-NEXT: vcmpnlepd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x06]
1047 ; AVX512-NEXT: vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
1048 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1049 %fcmp = fcmp ugt <2 x double> %a0, %a1
1050 %sext = sext <2 x i1> %fcmp to <2 x i64>
1051 %res = bitcast <2 x i64> %sext to <2 x double>
1052 ret <2 x double> %res
1055 define <2 x double> @test_mm_cmpnle_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
1056 ; SSE-LABEL: test_mm_cmpnle_sd:
1058 ; SSE-NEXT: cmpnlesd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0xc2,0xc1,0x06]
1059 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1061 ; AVX-LABEL: test_mm_cmpnle_sd:
1063 ; AVX-NEXT: vcmpnlesd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xc2,0xc1,0x06]
1064 ; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1065 %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 6)
1066 ret <2 x double> %res
1069 define <2 x double> @test_mm_cmpnlt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
1070 ; SSE-LABEL: test_mm_cmpnlt_pd:
1072 ; SSE-NEXT: cmpnltpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xc2,0xc1,0x05]
1073 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1075 ; AVX1-LABEL: test_mm_cmpnlt_pd:
1077 ; AVX1-NEXT: vcmpnltpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc2,0xc1,0x05]
1078 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1080 ; AVX512-LABEL: test_mm_cmpnlt_pd:
1082 ; AVX512-NEXT: vcmpnltpd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x05]
1083 ; AVX512-NEXT: vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
1084 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1085 %fcmp = fcmp uge <2 x double> %a0, %a1
1086 %sext = sext <2 x i1> %fcmp to <2 x i64>
1087 %res = bitcast <2 x i64> %sext to <2 x double>
1088 ret <2 x double> %res
1091 define <2 x double> @test_mm_cmpnlt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
1092 ; SSE-LABEL: test_mm_cmpnlt_sd:
1094 ; SSE-NEXT: cmpnltsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0xc2,0xc1,0x05]
1095 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1097 ; AVX-LABEL: test_mm_cmpnlt_sd:
1099 ; AVX-NEXT: vcmpnltsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xc2,0xc1,0x05]
1100 ; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1101 %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 5)
1102 ret <2 x double> %res
1105 define <2 x double> @test_mm_cmpord_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
1106 ; SSE-LABEL: test_mm_cmpord_pd:
1108 ; SSE-NEXT: cmpordpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xc2,0xc1,0x07]
1109 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1111 ; AVX1-LABEL: test_mm_cmpord_pd:
1113 ; AVX1-NEXT: vcmpordpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc2,0xc1,0x07]
1114 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1116 ; AVX512-LABEL: test_mm_cmpord_pd:
1118 ; AVX512-NEXT: vcmpordpd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x07]
1119 ; AVX512-NEXT: vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
1120 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1121 %fcmp = fcmp ord <2 x double> %a0, %a1
1122 %sext = sext <2 x i1> %fcmp to <2 x i64>
1123 %res = bitcast <2 x i64> %sext to <2 x double>
1124 ret <2 x double> %res
1127 define <2 x double> @test_mm_cmpord_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
1128 ; SSE-LABEL: test_mm_cmpord_sd:
1130 ; SSE-NEXT: cmpordsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0xc2,0xc1,0x07]
1131 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1133 ; AVX-LABEL: test_mm_cmpord_sd:
1135 ; AVX-NEXT: vcmpordsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xc2,0xc1,0x07]
1136 ; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1137 %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 7)
1138 ret <2 x double> %res
1141 define <2 x double> @test_mm_cmpunord_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
1142 ; SSE-LABEL: test_mm_cmpunord_pd:
1144 ; SSE-NEXT: cmpunordpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xc2,0xc1,0x03]
1145 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1147 ; AVX1-LABEL: test_mm_cmpunord_pd:
1149 ; AVX1-NEXT: vcmpunordpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc2,0xc1,0x03]
1150 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1152 ; AVX512-LABEL: test_mm_cmpunord_pd:
1154 ; AVX512-NEXT: vcmpunordpd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x03]
1155 ; AVX512-NEXT: vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
1156 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1157 %fcmp = fcmp uno <2 x double> %a0, %a1
1158 %sext = sext <2 x i1> %fcmp to <2 x i64>
1159 %res = bitcast <2 x i64> %sext to <2 x double>
1160 ret <2 x double> %res
1163 define <2 x double> @test_mm_cmpunord_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
1164 ; SSE-LABEL: test_mm_cmpunord_sd:
1166 ; SSE-NEXT: cmpunordsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0xc2,0xc1,0x03]
1167 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1169 ; AVX-LABEL: test_mm_cmpunord_sd:
1171 ; AVX-NEXT: vcmpunordsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xc2,0xc1,0x03]
1172 ; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1173 %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 3)
1174 ret <2 x double> %res
1177 define i32 @test_mm_comieq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
1178 ; SSE-LABEL: test_mm_comieq_sd:
1180 ; SSE-NEXT: comisd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x2f,0xc1]
1181 ; SSE-NEXT: setnp %al # encoding: [0x0f,0x9b,0xc0]
1182 ; SSE-NEXT: sete %cl # encoding: [0x0f,0x94,0xc1]
1183 ; SSE-NEXT: andb %al, %cl # encoding: [0x20,0xc1]
1184 ; SSE-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
1185 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1187 ; AVX1-LABEL: test_mm_comieq_sd:
1189 ; AVX1-NEXT: vcomisd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x2f,0xc1]
1190 ; AVX1-NEXT: setnp %al # encoding: [0x0f,0x9b,0xc0]
1191 ; AVX1-NEXT: sete %cl # encoding: [0x0f,0x94,0xc1]
1192 ; AVX1-NEXT: andb %al, %cl # encoding: [0x20,0xc1]
1193 ; AVX1-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
1194 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1196 ; AVX512-LABEL: test_mm_comieq_sd:
1198 ; AVX512-NEXT: vcomisd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
1199 ; AVX512-NEXT: setnp %al # encoding: [0x0f,0x9b,0xc0]
1200 ; AVX512-NEXT: sete %cl # encoding: [0x0f,0x94,0xc1]
1201 ; AVX512-NEXT: andb %al, %cl # encoding: [0x20,0xc1]
1202 ; AVX512-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
1203 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1204 %res = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %a1)
1207 declare i32 @llvm.x86.sse2.comieq.sd(<2 x double>, <2 x double>) nounwind readnone
1209 define i32 @test_mm_comige_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
1210 ; SSE-LABEL: test_mm_comige_sd:
1212 ; SSE-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
1213 ; SSE-NEXT: comisd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x2f,0xc1]
1214 ; SSE-NEXT: setae %al # encoding: [0x0f,0x93,0xc0]
1215 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1217 ; AVX1-LABEL: test_mm_comige_sd:
1219 ; AVX1-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
1220 ; AVX1-NEXT: vcomisd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x2f,0xc1]
1221 ; AVX1-NEXT: setae %al # encoding: [0x0f,0x93,0xc0]
1222 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1224 ; AVX512-LABEL: test_mm_comige_sd:
1226 ; AVX512-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
1227 ; AVX512-NEXT: vcomisd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
1228 ; AVX512-NEXT: setae %al # encoding: [0x0f,0x93,0xc0]
1229 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1230 %res = call i32 @llvm.x86.sse2.comige.sd(<2 x double> %a0, <2 x double> %a1)
1233 declare i32 @llvm.x86.sse2.comige.sd(<2 x double>, <2 x double>) nounwind readnone
1235 define i32 @test_mm_comigt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
1236 ; SSE-LABEL: test_mm_comigt_sd:
1238 ; SSE-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
1239 ; SSE-NEXT: comisd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x2f,0xc1]
1240 ; SSE-NEXT: seta %al # encoding: [0x0f,0x97,0xc0]
1241 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1243 ; AVX1-LABEL: test_mm_comigt_sd:
1245 ; AVX1-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
1246 ; AVX1-NEXT: vcomisd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x2f,0xc1]
1247 ; AVX1-NEXT: seta %al # encoding: [0x0f,0x97,0xc0]
1248 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1250 ; AVX512-LABEL: test_mm_comigt_sd:
1252 ; AVX512-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
1253 ; AVX512-NEXT: vcomisd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
1254 ; AVX512-NEXT: seta %al # encoding: [0x0f,0x97,0xc0]
1255 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1256 %res = call i32 @llvm.x86.sse2.comigt.sd(<2 x double> %a0, <2 x double> %a1)
1259 declare i32 @llvm.x86.sse2.comigt.sd(<2 x double>, <2 x double>) nounwind readnone
1261 define i32 @test_mm_comile_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
1262 ; SSE-LABEL: test_mm_comile_sd:
1264 ; SSE-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
1265 ; SSE-NEXT: comisd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x2f,0xc8]
1266 ; SSE-NEXT: setae %al # encoding: [0x0f,0x93,0xc0]
1267 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1269 ; AVX1-LABEL: test_mm_comile_sd:
1271 ; AVX1-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
1272 ; AVX1-NEXT: vcomisd %xmm0, %xmm1 # encoding: [0xc5,0xf9,0x2f,0xc8]
1273 ; AVX1-NEXT: setae %al # encoding: [0x0f,0x93,0xc0]
1274 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1276 ; AVX512-LABEL: test_mm_comile_sd:
1278 ; AVX512-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
1279 ; AVX512-NEXT: vcomisd %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc8]
1280 ; AVX512-NEXT: setae %al # encoding: [0x0f,0x93,0xc0]
1281 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1282 %res = call i32 @llvm.x86.sse2.comile.sd(<2 x double> %a0, <2 x double> %a1)
1285 declare i32 @llvm.x86.sse2.comile.sd(<2 x double>, <2 x double>) nounwind readnone
1287 define i32 @test_mm_comilt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
1288 ; SSE-LABEL: test_mm_comilt_sd:
1290 ; SSE-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
1291 ; SSE-NEXT: comisd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x2f,0xc8]
1292 ; SSE-NEXT: seta %al # encoding: [0x0f,0x97,0xc0]
1293 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1295 ; AVX1-LABEL: test_mm_comilt_sd:
1297 ; AVX1-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
1298 ; AVX1-NEXT: vcomisd %xmm0, %xmm1 # encoding: [0xc5,0xf9,0x2f,0xc8]
1299 ; AVX1-NEXT: seta %al # encoding: [0x0f,0x97,0xc0]
1300 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1302 ; AVX512-LABEL: test_mm_comilt_sd:
1304 ; AVX512-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
1305 ; AVX512-NEXT: vcomisd %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc8]
1306 ; AVX512-NEXT: seta %al # encoding: [0x0f,0x97,0xc0]
1307 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1308 %res = call i32 @llvm.x86.sse2.comilt.sd(<2 x double> %a0, <2 x double> %a1)
1311 declare i32 @llvm.x86.sse2.comilt.sd(<2 x double>, <2 x double>) nounwind readnone
1313 define i32 @test_mm_comineq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
1314 ; SSE-LABEL: test_mm_comineq_sd:
1316 ; SSE-NEXT: comisd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x2f,0xc1]
1317 ; SSE-NEXT: setp %al # encoding: [0x0f,0x9a,0xc0]
1318 ; SSE-NEXT: setne %cl # encoding: [0x0f,0x95,0xc1]
1319 ; SSE-NEXT: orb %al, %cl # encoding: [0x08,0xc1]
1320 ; SSE-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
1321 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1323 ; AVX1-LABEL: test_mm_comineq_sd:
1325 ; AVX1-NEXT: vcomisd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x2f,0xc1]
1326 ; AVX1-NEXT: setp %al # encoding: [0x0f,0x9a,0xc0]
1327 ; AVX1-NEXT: setne %cl # encoding: [0x0f,0x95,0xc1]
1328 ; AVX1-NEXT: orb %al, %cl # encoding: [0x08,0xc1]
1329 ; AVX1-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
1330 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1332 ; AVX512-LABEL: test_mm_comineq_sd:
1334 ; AVX512-NEXT: vcomisd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
1335 ; AVX512-NEXT: setp %al # encoding: [0x0f,0x9a,0xc0]
1336 ; AVX512-NEXT: setne %cl # encoding: [0x0f,0x95,0xc1]
1337 ; AVX512-NEXT: orb %al, %cl # encoding: [0x08,0xc1]
1338 ; AVX512-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
1339 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1340 %res = call i32 @llvm.x86.sse2.comineq.sd(<2 x double> %a0, <2 x double> %a1)
1343 declare i32 @llvm.x86.sse2.comineq.sd(<2 x double>, <2 x double>) nounwind readnone
1345 define <2 x double> @test_mm_cvtepi32_pd(<2 x i64> %a0) nounwind {
1346 ; SSE-LABEL: test_mm_cvtepi32_pd:
1348 ; SSE-NEXT: cvtdq2pd %xmm0, %xmm0 # encoding: [0xf3,0x0f,0xe6,0xc0]
1349 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1351 ; AVX1-LABEL: test_mm_cvtepi32_pd:
1353 ; AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0 # encoding: [0xc5,0xfa,0xe6,0xc0]
1354 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1356 ; AVX512-LABEL: test_mm_cvtepi32_pd:
1358 ; AVX512-NEXT: vcvtdq2pd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0xe6,0xc0]
1359 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1360 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
1361 %ext = shufflevector <4 x i32> %arg0, <4 x i32> %arg0, <2 x i32> <i32 0, i32 1>
1362 %res = sitofp <2 x i32> %ext to <2 x double>
1363 ret <2 x double> %res
1366 define <4 x float> @test_mm_cvtepi32_ps(<2 x i64> %a0) nounwind {
1367 ; SSE-LABEL: test_mm_cvtepi32_ps:
1369 ; SSE-NEXT: cvtdq2ps %xmm0, %xmm0 # encoding: [0x0f,0x5b,0xc0]
1370 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1372 ; AVX1-LABEL: test_mm_cvtepi32_ps:
1374 ; AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x5b,0xc0]
1375 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1377 ; AVX512-LABEL: test_mm_cvtepi32_ps:
1379 ; AVX512-NEXT: vcvtdq2ps %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5b,0xc0]
1380 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1381 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
1382 %res = sitofp <4 x i32> %arg0 to <4 x float>
1383 ret <4 x float> %res
1386 define <2 x i64> @test_mm_cvtpd_epi32(<2 x double> %a0) nounwind {
1387 ; SSE-LABEL: test_mm_cvtpd_epi32:
1389 ; SSE-NEXT: cvtpd2dq %xmm0, %xmm0 # encoding: [0xf2,0x0f,0xe6,0xc0]
1390 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1392 ; AVX1-LABEL: test_mm_cvtpd_epi32:
1394 ; AVX1-NEXT: vcvtpd2dq %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xe6,0xc0]
1395 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1397 ; AVX512-LABEL: test_mm_cvtpd_epi32:
1399 ; AVX512-NEXT: vcvtpd2dq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0xe6,0xc0]
1400 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1401 %res = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
1402 %bc = bitcast <4 x i32> %res to <2 x i64>
1405 declare <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double>) nounwind readnone
1407 define <4 x float> @test_mm_cvtpd_ps(<2 x double> %a0) nounwind {
1408 ; SSE-LABEL: test_mm_cvtpd_ps:
1410 ; SSE-NEXT: cvtpd2ps %xmm0, %xmm0 # encoding: [0x66,0x0f,0x5a,0xc0]
1411 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1413 ; AVX1-LABEL: test_mm_cvtpd_ps:
1415 ; AVX1-NEXT: vcvtpd2ps %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x5a,0xc0]
1416 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1418 ; AVX512-LABEL: test_mm_cvtpd_ps:
1420 ; AVX512-NEXT: vcvtpd2ps %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5a,0xc0]
1421 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1422 %res = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0)
1423 ret <4 x float> %res
1425 declare <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double>) nounwind readnone
1427 define <2 x i64> @test_mm_cvtps_epi32(<4 x float> %a0) nounwind {
1428 ; SSE-LABEL: test_mm_cvtps_epi32:
1430 ; SSE-NEXT: cvtps2dq %xmm0, %xmm0 # encoding: [0x66,0x0f,0x5b,0xc0]
1431 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1433 ; AVX1-LABEL: test_mm_cvtps_epi32:
1435 ; AVX1-NEXT: vcvtps2dq %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x5b,0xc0]
1436 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1438 ; AVX512-LABEL: test_mm_cvtps_epi32:
1440 ; AVX512-NEXT: vcvtps2dq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5b,0xc0]
1441 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1442 %res = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0)
1443 %bc = bitcast <4 x i32> %res to <2 x i64>
1446 declare <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float>) nounwind readnone
1448 define <2 x double> @test_mm_cvtps_pd(<4 x float> %a0) nounwind {
1449 ; SSE-LABEL: test_mm_cvtps_pd:
1451 ; SSE-NEXT: cvtps2pd %xmm0, %xmm0 # encoding: [0x0f,0x5a,0xc0]
1452 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1454 ; AVX1-LABEL: test_mm_cvtps_pd:
1456 ; AVX1-NEXT: vcvtps2pd %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x5a,0xc0]
1457 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1459 ; AVX512-LABEL: test_mm_cvtps_pd:
1461 ; AVX512-NEXT: vcvtps2pd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5a,0xc0]
1462 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1463 %ext = shufflevector <4 x float> %a0, <4 x float> %a0, <2 x i32> <i32 0, i32 1>
1464 %res = fpext <2 x float> %ext to <2 x double>
1465 ret <2 x double> %res
1468 define double @test_mm_cvtsd_f64(<2 x double> %a0) nounwind {
1469 ; X86-SSE-LABEL: test_mm_cvtsd_f64:
1471 ; X86-SSE-NEXT: pushl %ebp # encoding: [0x55]
1472 ; X86-SSE-NEXT: movl %esp, %ebp # encoding: [0x89,0xe5]
1473 ; X86-SSE-NEXT: andl $-8, %esp # encoding: [0x83,0xe4,0xf8]
1474 ; X86-SSE-NEXT: subl $8, %esp # encoding: [0x83,0xec,0x08]
1475 ; X86-SSE-NEXT: movlps %xmm0, (%esp) # encoding: [0x0f,0x13,0x04,0x24]
1476 ; X86-SSE-NEXT: fldl (%esp) # encoding: [0xdd,0x04,0x24]
1477 ; X86-SSE-NEXT: movl %ebp, %esp # encoding: [0x89,0xec]
1478 ; X86-SSE-NEXT: popl %ebp # encoding: [0x5d]
1479 ; X86-SSE-NEXT: retl # encoding: [0xc3]
1481 ; X86-AVX1-LABEL: test_mm_cvtsd_f64:
1482 ; X86-AVX1: # %bb.0:
1483 ; X86-AVX1-NEXT: pushl %ebp # encoding: [0x55]
1484 ; X86-AVX1-NEXT: movl %esp, %ebp # encoding: [0x89,0xe5]
1485 ; X86-AVX1-NEXT: andl $-8, %esp # encoding: [0x83,0xe4,0xf8]
1486 ; X86-AVX1-NEXT: subl $8, %esp # encoding: [0x83,0xec,0x08]
1487 ; X86-AVX1-NEXT: vmovlps %xmm0, (%esp) # encoding: [0xc5,0xf8,0x13,0x04,0x24]
1488 ; X86-AVX1-NEXT: fldl (%esp) # encoding: [0xdd,0x04,0x24]
1489 ; X86-AVX1-NEXT: movl %ebp, %esp # encoding: [0x89,0xec]
1490 ; X86-AVX1-NEXT: popl %ebp # encoding: [0x5d]
1491 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
1493 ; X86-AVX512-LABEL: test_mm_cvtsd_f64:
1494 ; X86-AVX512: # %bb.0:
1495 ; X86-AVX512-NEXT: pushl %ebp # encoding: [0x55]
1496 ; X86-AVX512-NEXT: movl %esp, %ebp # encoding: [0x89,0xe5]
1497 ; X86-AVX512-NEXT: andl $-8, %esp # encoding: [0x83,0xe4,0xf8]
1498 ; X86-AVX512-NEXT: subl $8, %esp # encoding: [0x83,0xec,0x08]
1499 ; X86-AVX512-NEXT: vmovlps %xmm0, (%esp) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x13,0x04,0x24]
1500 ; X86-AVX512-NEXT: fldl (%esp) # encoding: [0xdd,0x04,0x24]
1501 ; X86-AVX512-NEXT: movl %ebp, %esp # encoding: [0x89,0xec]
1502 ; X86-AVX512-NEXT: popl %ebp # encoding: [0x5d]
1503 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
1505 ; X64-LABEL: test_mm_cvtsd_f64:
1507 ; X64-NEXT: retq # encoding: [0xc3]
1509 ; X32-LABEL: test_mm_cvtsd_f64:
1511 ; X32-NEXT: retq # encoding: [0xc3]
1512 %res = extractelement <2 x double> %a0, i32 0
1516 define i32 @test_mm_cvtsd_si32(<2 x double> %a0) nounwind {
1517 ; SSE-LABEL: test_mm_cvtsd_si32:
1519 ; SSE-NEXT: cvtsd2si %xmm0, %eax # encoding: [0xf2,0x0f,0x2d,0xc0]
1520 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1522 ; AVX1-LABEL: test_mm_cvtsd_si32:
1524 ; AVX1-NEXT: vcvtsd2si %xmm0, %eax # encoding: [0xc5,0xfb,0x2d,0xc0]
1525 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1527 ; AVX512-LABEL: test_mm_cvtsd_si32:
1529 ; AVX512-NEXT: vcvtsd2si %xmm0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2d,0xc0]
1530 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1531 %res = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a0)
1534 declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone
1536 define <4 x float> @test_mm_cvtsd_ss(<4 x float> %a0, <2 x double> %a1) {
1537 ; SSE-LABEL: test_mm_cvtsd_ss:
1539 ; SSE-NEXT: cvtsd2ss %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x5a,0xc1]
1540 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1542 ; AVX1-LABEL: test_mm_cvtsd_ss:
1544 ; AVX1-NEXT: vcvtsd2ss %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x5a,0xc1]
1545 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1547 ; AVX512-LABEL: test_mm_cvtsd_ss:
1549 ; AVX512-NEXT: vcvtsd2ss %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5a,0xc1]
1550 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1551 %res = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> %a0, <2 x double> %a1)
1552 ret <4 x float> %res
1554 declare <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float>, <2 x double>) nounwind readnone
1556 define <4 x float> @test_mm_cvtsd_ss_load(<4 x float> %a0, <2 x double>* %p1) {
1557 ; X86-SSE-LABEL: test_mm_cvtsd_ss_load:
1559 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
1560 ; X86-SSE-NEXT: cvtsd2ss (%eax), %xmm0 # encoding: [0xf2,0x0f,0x5a,0x00]
1561 ; X86-SSE-NEXT: retl # encoding: [0xc3]
1563 ; X86-AVX1-LABEL: test_mm_cvtsd_ss_load:
1564 ; X86-AVX1: # %bb.0:
1565 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
1566 ; X86-AVX1-NEXT: vcvtsd2ss (%eax), %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x5a,0x00]
1567 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
1569 ; X86-AVX512-LABEL: test_mm_cvtsd_ss_load:
1570 ; X86-AVX512: # %bb.0:
1571 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
1572 ; X86-AVX512-NEXT: vcvtsd2ss (%eax), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5a,0x00]
1573 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
1575 ; X64-SSE-LABEL: test_mm_cvtsd_ss_load:
1577 ; X64-SSE-NEXT: cvtsd2ss (%rdi), %xmm0 # encoding: [0xf2,0x0f,0x5a,0x07]
1578 ; X64-SSE-NEXT: retq # encoding: [0xc3]
1580 ; X64-AVX1-LABEL: test_mm_cvtsd_ss_load:
1581 ; X64-AVX1: # %bb.0:
1582 ; X64-AVX1-NEXT: vcvtsd2ss (%rdi), %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x5a,0x07]
1583 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
1585 ; X64-AVX512-LABEL: test_mm_cvtsd_ss_load:
1586 ; X64-AVX512: # %bb.0:
1587 ; X64-AVX512-NEXT: vcvtsd2ss (%rdi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5a,0x07]
1588 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
1590 ; X32-SSE-LABEL: test_mm_cvtsd_ss_load:
1592 ; X32-SSE-NEXT: cvtsd2ss (%edi), %xmm0 # encoding: [0x67,0xf2,0x0f,0x5a,0x07]
1593 ; X32-SSE-NEXT: retq # encoding: [0xc3]
1595 ; X32-AVX1-LABEL: test_mm_cvtsd_ss_load:
1596 ; X32-AVX1: # %bb.0:
1597 ; X32-AVX1-NEXT: vcvtsd2ss (%edi), %xmm0, %xmm0 # encoding: [0x67,0xc5,0xfb,0x5a,0x07]
1598 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
1600 ; X32-AVX512-LABEL: test_mm_cvtsd_ss_load:
1601 ; X32-AVX512: # %bb.0:
1602 ; X32-AVX512-NEXT: vcvtsd2ss (%edi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x5a,0x07]
1603 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
1604 %a1 = load <2 x double>, <2 x double>* %p1
1605 %res = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> %a0, <2 x double> %a1)
1606 ret <4 x float> %res
1609 define i32 @test_mm_cvtsi128_si32(<2 x i64> %a0) nounwind {
1610 ; SSE-LABEL: test_mm_cvtsi128_si32:
1612 ; SSE-NEXT: movd %xmm0, %eax # encoding: [0x66,0x0f,0x7e,0xc0]
1613 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1615 ; AVX1-LABEL: test_mm_cvtsi128_si32:
1617 ; AVX1-NEXT: vmovd %xmm0, %eax # encoding: [0xc5,0xf9,0x7e,0xc0]
1618 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1620 ; AVX512-LABEL: test_mm_cvtsi128_si32:
1622 ; AVX512-NEXT: vmovd %xmm0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc0]
1623 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1624 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
1625 %res = extractelement <4 x i32> %arg0, i32 0
1629 define <2 x double> @test_mm_cvtsi32_sd(<2 x double> %a0, i32 %a1) nounwind {
1630 ; X86-SSE-LABEL: test_mm_cvtsi32_sd:
1632 ; X86-SSE-NEXT: cvtsi2sdl {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf2,0x0f,0x2a,0x44,0x24,0x04]
1633 ; X86-SSE-NEXT: retl # encoding: [0xc3]
1635 ; X86-AVX1-LABEL: test_mm_cvtsi32_sd:
1636 ; X86-AVX1: # %bb.0:
1637 ; X86-AVX1-NEXT: vcvtsi2sdl {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x2a,0x44,0x24,0x04]
1638 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
1640 ; X86-AVX512-LABEL: test_mm_cvtsi32_sd:
1641 ; X86-AVX512: # %bb.0:
1642 ; X86-AVX512-NEXT: vcvtsi2sdl {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2a,0x44,0x24,0x04]
1643 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
1645 ; X64-SSE-LABEL: test_mm_cvtsi32_sd:
1647 ; X64-SSE-NEXT: cvtsi2sd %edi, %xmm0 # encoding: [0xf2,0x0f,0x2a,0xc7]
1648 ; X64-SSE-NEXT: retq # encoding: [0xc3]
1650 ; X64-AVX1-LABEL: test_mm_cvtsi32_sd:
1651 ; X64-AVX1: # %bb.0:
1652 ; X64-AVX1-NEXT: vcvtsi2sd %edi, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x2a,0xc7]
1653 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
1655 ; X64-AVX512-LABEL: test_mm_cvtsi32_sd:
1656 ; X64-AVX512: # %bb.0:
1657 ; X64-AVX512-NEXT: vcvtsi2sd %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2a,0xc7]
1658 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
1660 ; X32-SSE-LABEL: test_mm_cvtsi32_sd:
1662 ; X32-SSE-NEXT: cvtsi2sd %edi, %xmm0 # encoding: [0xf2,0x0f,0x2a,0xc7]
1663 ; X32-SSE-NEXT: retq # encoding: [0xc3]
1665 ; X32-AVX1-LABEL: test_mm_cvtsi32_sd:
1666 ; X32-AVX1: # %bb.0:
1667 ; X32-AVX1-NEXT: vcvtsi2sd %edi, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x2a,0xc7]
1668 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
1670 ; X32-AVX512-LABEL: test_mm_cvtsi32_sd:
1671 ; X32-AVX512: # %bb.0:
1672 ; X32-AVX512-NEXT: vcvtsi2sd %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2a,0xc7]
1673 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
1674 %cvt = sitofp i32 %a1 to double
1675 %res = insertelement <2 x double> %a0, double %cvt, i32 0
1676 ret <2 x double> %res
1679 define <2 x i64> @test_mm_cvtsi32_si128(i32 %a0) nounwind {
1680 ; X86-SSE-LABEL: test_mm_cvtsi32_si128:
1682 ; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
1683 ; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
1684 ; X86-SSE-NEXT: retl # encoding: [0xc3]
1686 ; X86-AVX1-LABEL: test_mm_cvtsi32_si128:
1687 ; X86-AVX1: # %bb.0:
1688 ; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
1689 ; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
1690 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
1692 ; X86-AVX512-LABEL: test_mm_cvtsi32_si128:
1693 ; X86-AVX512: # %bb.0:
1694 ; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
1695 ; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
1696 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
1698 ; X64-SSE-LABEL: test_mm_cvtsi32_si128:
1700 ; X64-SSE-NEXT: movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
1701 ; X64-SSE-NEXT: retq # encoding: [0xc3]
1703 ; X64-AVX1-LABEL: test_mm_cvtsi32_si128:
1704 ; X64-AVX1: # %bb.0:
1705 ; X64-AVX1-NEXT: vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
1706 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
1708 ; X64-AVX512-LABEL: test_mm_cvtsi32_si128:
1709 ; X64-AVX512: # %bb.0:
1710 ; X64-AVX512-NEXT: vmovd %edi, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
1711 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
1713 ; X32-SSE-LABEL: test_mm_cvtsi32_si128:
1715 ; X32-SSE-NEXT: movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
1716 ; X32-SSE-NEXT: retq # encoding: [0xc3]
1718 ; X32-AVX1-LABEL: test_mm_cvtsi32_si128:
1719 ; X32-AVX1: # %bb.0:
1720 ; X32-AVX1-NEXT: vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
1721 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
1723 ; X32-AVX512-LABEL: test_mm_cvtsi32_si128:
1724 ; X32-AVX512: # %bb.0:
1725 ; X32-AVX512-NEXT: vmovd %edi, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
1726 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
1727 %res0 = insertelement <4 x i32> undef, i32 %a0, i32 0
1728 %res1 = insertelement <4 x i32> %res0, i32 0, i32 1
1729 %res2 = insertelement <4 x i32> %res1, i32 0, i32 2
1730 %res3 = insertelement <4 x i32> %res2, i32 0, i32 3
1731 %res = bitcast <4 x i32> %res3 to <2 x i64>
1735 define <2 x double> @test_mm_cvtss_sd(<2 x double> %a0, <4 x float> %a1) nounwind {
1736 ; SSE-LABEL: test_mm_cvtss_sd:
1738 ; SSE-NEXT: cvtss2sd %xmm1, %xmm0 # encoding: [0xf3,0x0f,0x5a,0xc1]
1739 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1741 ; AVX1-LABEL: test_mm_cvtss_sd:
1743 ; AVX1-NEXT: vcvtss2sd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x5a,0xc1]
1744 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1746 ; AVX512-LABEL: test_mm_cvtss_sd:
1748 ; AVX512-NEXT: vcvtss2sd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5a,0xc1]
1749 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1750 %ext = extractelement <4 x float> %a1, i32 0
1751 %cvt = fpext float %ext to double
1752 %res = insertelement <2 x double> %a0, double %cvt, i32 0
1753 ret <2 x double> %res
1756 define <2 x i64> @test_mm_cvttpd_epi32(<2 x double> %a0) nounwind {
1757 ; SSE-LABEL: test_mm_cvttpd_epi32:
1759 ; SSE-NEXT: cvttpd2dq %xmm0, %xmm0 # encoding: [0x66,0x0f,0xe6,0xc0]
1760 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1762 ; AVX1-LABEL: test_mm_cvttpd_epi32:
1764 ; AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe6,0xc0]
1765 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1767 ; AVX512-LABEL: test_mm_cvttpd_epi32:
1769 ; AVX512-NEXT: vcvttpd2dq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe6,0xc0]
1770 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1771 %res = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0)
1772 %bc = bitcast <4 x i32> %res to <2 x i64>
1775 declare <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double>) nounwind readnone
1777 define <2 x i64> @test_mm_cvttps_epi32(<4 x float> %a0) nounwind {
1778 ; SSE-LABEL: test_mm_cvttps_epi32:
1780 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0 # encoding: [0xf3,0x0f,0x5b,0xc0]
1781 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1783 ; AVX1-LABEL: test_mm_cvttps_epi32:
1785 ; AVX1-NEXT: vcvttps2dq %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x5b,0xc0]
1786 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1788 ; AVX512-LABEL: test_mm_cvttps_epi32:
1790 ; AVX512-NEXT: vcvttps2dq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5b,0xc0]
1791 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1792 %res = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %a0)
1793 %bc = bitcast <4 x i32> %res to <2 x i64>
1796 declare <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float>) nounwind readnone
1798 define i32 @test_mm_cvttsd_si32(<2 x double> %a0) nounwind {
1799 ; SSE-LABEL: test_mm_cvttsd_si32:
1801 ; SSE-NEXT: cvttsd2si %xmm0, %eax # encoding: [0xf2,0x0f,0x2c,0xc0]
1802 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1804 ; AVX1-LABEL: test_mm_cvttsd_si32:
1806 ; AVX1-NEXT: vcvttsd2si %xmm0, %eax # encoding: [0xc5,0xfb,0x2c,0xc0]
1807 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1809 ; AVX512-LABEL: test_mm_cvttsd_si32:
1811 ; AVX512-NEXT: vcvttsd2si %xmm0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2c,0xc0]
1812 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1813 %res = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %a0)
1816 declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone
1818 define <2 x double> @test_mm_div_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
1819 ; SSE-LABEL: test_mm_div_pd:
1821 ; SSE-NEXT: divpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x5e,0xc1]
1822 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1824 ; AVX1-LABEL: test_mm_div_pd:
1826 ; AVX1-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x5e,0xc1]
1827 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1829 ; AVX512-LABEL: test_mm_div_pd:
1831 ; AVX512-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5e,0xc1]
1832 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1833 %res = fdiv <2 x double> %a0, %a1
1834 ret <2 x double> %res
1837 define <2 x double> @test_mm_div_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
1838 ; SSE-LABEL: test_mm_div_sd:
1840 ; SSE-NEXT: divsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x5e,0xc1]
1841 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1843 ; AVX1-LABEL: test_mm_div_sd:
1845 ; AVX1-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x5e,0xc1]
1846 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1848 ; AVX512-LABEL: test_mm_div_sd:
1850 ; AVX512-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5e,0xc1]
1851 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1852 %ext0 = extractelement <2 x double> %a0, i32 0
1853 %ext1 = extractelement <2 x double> %a1, i32 0
1854 %fdiv = fdiv double %ext0, %ext1
1855 %res = insertelement <2 x double> %a0, double %fdiv, i32 0
1856 ret <2 x double> %res
1859 define i32 @test_mm_extract_epi16(<2 x i64> %a0) nounwind {
1860 ; SSE-LABEL: test_mm_extract_epi16:
1862 ; SSE-NEXT: pextrw $1, %xmm0, %eax # encoding: [0x66,0x0f,0xc5,0xc0,0x01]
1863 ; SSE-NEXT: movzwl %ax, %eax # encoding: [0x0f,0xb7,0xc0]
1864 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1866 ; AVX1-LABEL: test_mm_extract_epi16:
1868 ; AVX1-NEXT: vpextrw $1, %xmm0, %eax # encoding: [0xc5,0xf9,0xc5,0xc0,0x01]
1869 ; AVX1-NEXT: movzwl %ax, %eax # encoding: [0x0f,0xb7,0xc0]
1870 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1872 ; AVX512-LABEL: test_mm_extract_epi16:
1874 ; AVX512-NEXT: vpextrw $1, %xmm0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc5,0xc0,0x01]
1875 ; AVX512-NEXT: movzwl %ax, %eax # encoding: [0x0f,0xb7,0xc0]
1876 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1877 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
1878 %ext = extractelement <8 x i16> %arg0, i32 1
1879 %res = zext i16 %ext to i32
1883 define <2 x i64> @test_mm_insert_epi16(<2 x i64> %a0, i16 %a1) nounwind {
1884 ; X86-SSE-LABEL: test_mm_insert_epi16:
1886 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
1887 ; X86-SSE-NEXT: pinsrw $1, %eax, %xmm0 # encoding: [0x66,0x0f,0xc4,0xc0,0x01]
1888 ; X86-SSE-NEXT: retl # encoding: [0xc3]
1890 ; X86-AVX1-LABEL: test_mm_insert_epi16:
1891 ; X86-AVX1: # %bb.0:
1892 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
1893 ; X86-AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
1894 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
1896 ; X86-AVX512-LABEL: test_mm_insert_epi16:
1897 ; X86-AVX512: # %bb.0:
1898 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
1899 ; X86-AVX512-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
1900 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
1902 ; X64-SSE-LABEL: test_mm_insert_epi16:
1904 ; X64-SSE-NEXT: pinsrw $1, %edi, %xmm0 # encoding: [0x66,0x0f,0xc4,0xc7,0x01]
1905 ; X64-SSE-NEXT: retq # encoding: [0xc3]
1907 ; X64-AVX1-LABEL: test_mm_insert_epi16:
1908 ; X64-AVX1: # %bb.0:
1909 ; X64-AVX1-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc7,0x01]
1910 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
1912 ; X64-AVX512-LABEL: test_mm_insert_epi16:
1913 ; X64-AVX512: # %bb.0:
1914 ; X64-AVX512-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc7,0x01]
1915 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
1917 ; X32-SSE-LABEL: test_mm_insert_epi16:
1919 ; X32-SSE-NEXT: pinsrw $1, %edi, %xmm0 # encoding: [0x66,0x0f,0xc4,0xc7,0x01]
1920 ; X32-SSE-NEXT: retq # encoding: [0xc3]
1922 ; X32-AVX1-LABEL: test_mm_insert_epi16:
1923 ; X32-AVX1: # %bb.0:
1924 ; X32-AVX1-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc7,0x01]
1925 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
1927 ; X32-AVX512-LABEL: test_mm_insert_epi16:
1928 ; X32-AVX512: # %bb.0:
1929 ; X32-AVX512-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc7,0x01]
1930 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
1931 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
1932 %res = insertelement <8 x i16> %arg0, i16 %a1,i32 1
1933 %bc = bitcast <8 x i16> %res to <2 x i64>
1937 define void @test_mm_lfence() nounwind {
1938 ; CHECK-LABEL: test_mm_lfence:
1940 ; CHECK-NEXT: lfence # encoding: [0x0f,0xae,0xe8]
1941 ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
1942 call void @llvm.x86.sse2.lfence()
1945 declare void @llvm.x86.sse2.lfence() nounwind readnone
1947 define <2 x double> @test_mm_load_pd(double* %a0) nounwind {
1948 ; X86-SSE-LABEL: test_mm_load_pd:
1950 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
1951 ; X86-SSE-NEXT: movaps (%eax), %xmm0 # encoding: [0x0f,0x28,0x00]
1952 ; X86-SSE-NEXT: retl # encoding: [0xc3]
1954 ; X86-AVX1-LABEL: test_mm_load_pd:
1955 ; X86-AVX1: # %bb.0:
1956 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
1957 ; X86-AVX1-NEXT: vmovaps (%eax), %xmm0 # encoding: [0xc5,0xf8,0x28,0x00]
1958 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
1960 ; X86-AVX512-LABEL: test_mm_load_pd:
1961 ; X86-AVX512: # %bb.0:
1962 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
1963 ; X86-AVX512-NEXT: vmovaps (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x00]
1964 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
1966 ; X64-SSE-LABEL: test_mm_load_pd:
1968 ; X64-SSE-NEXT: movaps (%rdi), %xmm0 # encoding: [0x0f,0x28,0x07]
1969 ; X64-SSE-NEXT: retq # encoding: [0xc3]
1971 ; X64-AVX1-LABEL: test_mm_load_pd:
1972 ; X64-AVX1: # %bb.0:
1973 ; X64-AVX1-NEXT: vmovaps (%rdi), %xmm0 # encoding: [0xc5,0xf8,0x28,0x07]
1974 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
1976 ; X64-AVX512-LABEL: test_mm_load_pd:
1977 ; X64-AVX512: # %bb.0:
1978 ; X64-AVX512-NEXT: vmovaps (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x07]
1979 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
1981 ; X32-SSE-LABEL: test_mm_load_pd:
1983 ; X32-SSE-NEXT: movaps (%edi), %xmm0 # encoding: [0x67,0x0f,0x28,0x07]
1984 ; X32-SSE-NEXT: retq # encoding: [0xc3]
1986 ; X32-AVX1-LABEL: test_mm_load_pd:
1987 ; X32-AVX1: # %bb.0:
1988 ; X32-AVX1-NEXT: vmovaps (%edi), %xmm0 # encoding: [0x67,0xc5,0xf8,0x28,0x07]
1989 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
1991 ; X32-AVX512-LABEL: test_mm_load_pd:
1992 ; X32-AVX512: # %bb.0:
1993 ; X32-AVX512-NEXT: vmovaps (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x28,0x07]
1994 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
1995 %arg0 = bitcast double* %a0 to <2 x double>*
1996 %res = load <2 x double>, <2 x double>* %arg0, align 16
1997 ret <2 x double> %res
2000 define <2 x double> @test_mm_load_sd(double* %a0) nounwind {
2001 ; X86-SSE-LABEL: test_mm_load_sd:
2003 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2004 ; X86-SSE-NEXT: movsd (%eax), %xmm0 # encoding: [0xf2,0x0f,0x10,0x00]
2005 ; X86-SSE-NEXT: # xmm0 = mem[0],zero
2006 ; X86-SSE-NEXT: retl # encoding: [0xc3]
2008 ; X86-AVX1-LABEL: test_mm_load_sd:
2009 ; X86-AVX1: # %bb.0:
2010 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2011 ; X86-AVX1-NEXT: vmovsd (%eax), %xmm0 # encoding: [0xc5,0xfb,0x10,0x00]
2012 ; X86-AVX1-NEXT: # xmm0 = mem[0],zero
2013 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
2015 ; X86-AVX512-LABEL: test_mm_load_sd:
2016 ; X86-AVX512: # %bb.0:
2017 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2018 ; X86-AVX512-NEXT: vmovsd (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x00]
2019 ; X86-AVX512-NEXT: # xmm0 = mem[0],zero
2020 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
2022 ; X64-SSE-LABEL: test_mm_load_sd:
2024 ; X64-SSE-NEXT: movsd (%rdi), %xmm0 # encoding: [0xf2,0x0f,0x10,0x07]
2025 ; X64-SSE-NEXT: # xmm0 = mem[0],zero
2026 ; X64-SSE-NEXT: retq # encoding: [0xc3]
2028 ; X64-AVX1-LABEL: test_mm_load_sd:
2029 ; X64-AVX1: # %bb.0:
2030 ; X64-AVX1-NEXT: vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
2031 ; X64-AVX1-NEXT: # xmm0 = mem[0],zero
2032 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
2034 ; X64-AVX512-LABEL: test_mm_load_sd:
2035 ; X64-AVX512: # %bb.0:
2036 ; X64-AVX512-NEXT: vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
2037 ; X64-AVX512-NEXT: # xmm0 = mem[0],zero
2038 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
2040 ; X32-SSE-LABEL: test_mm_load_sd:
2042 ; X32-SSE-NEXT: movsd (%edi), %xmm0 # encoding: [0x67,0xf2,0x0f,0x10,0x07]
2043 ; X32-SSE-NEXT: # xmm0 = mem[0],zero
2044 ; X32-SSE-NEXT: retq # encoding: [0xc3]
2046 ; X32-AVX1-LABEL: test_mm_load_sd:
2047 ; X32-AVX1: # %bb.0:
2048 ; X32-AVX1-NEXT: vmovsd (%edi), %xmm0 # encoding: [0x67,0xc5,0xfb,0x10,0x07]
2049 ; X32-AVX1-NEXT: # xmm0 = mem[0],zero
2050 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
2052 ; X32-AVX512-LABEL: test_mm_load_sd:
2053 ; X32-AVX512: # %bb.0:
2054 ; X32-AVX512-NEXT: vmovsd (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x10,0x07]
2055 ; X32-AVX512-NEXT: # xmm0 = mem[0],zero
2056 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
2057 %ld = load double, double* %a0, align 1
2058 %res0 = insertelement <2 x double> undef, double %ld, i32 0
2059 %res1 = insertelement <2 x double> %res0, double 0.0, i32 1
2060 ret <2 x double> %res1
2063 define <2 x i64> @test_mm_load_si128(<2 x i64>* %a0) nounwind {
2064 ; X86-SSE-LABEL: test_mm_load_si128:
2066 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2067 ; X86-SSE-NEXT: movaps (%eax), %xmm0 # encoding: [0x0f,0x28,0x00]
2068 ; X86-SSE-NEXT: retl # encoding: [0xc3]
2070 ; X86-AVX1-LABEL: test_mm_load_si128:
2071 ; X86-AVX1: # %bb.0:
2072 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2073 ; X86-AVX1-NEXT: vmovaps (%eax), %xmm0 # encoding: [0xc5,0xf8,0x28,0x00]
2074 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
2076 ; X86-AVX512-LABEL: test_mm_load_si128:
2077 ; X86-AVX512: # %bb.0:
2078 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2079 ; X86-AVX512-NEXT: vmovaps (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x00]
2080 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
2082 ; X64-SSE-LABEL: test_mm_load_si128:
2084 ; X64-SSE-NEXT: movaps (%rdi), %xmm0 # encoding: [0x0f,0x28,0x07]
2085 ; X64-SSE-NEXT: retq # encoding: [0xc3]
2087 ; X64-AVX1-LABEL: test_mm_load_si128:
2088 ; X64-AVX1: # %bb.0:
2089 ; X64-AVX1-NEXT: vmovaps (%rdi), %xmm0 # encoding: [0xc5,0xf8,0x28,0x07]
2090 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
2092 ; X64-AVX512-LABEL: test_mm_load_si128:
2093 ; X64-AVX512: # %bb.0:
2094 ; X64-AVX512-NEXT: vmovaps (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x07]
2095 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
2097 ; X32-SSE-LABEL: test_mm_load_si128:
2099 ; X32-SSE-NEXT: movaps (%edi), %xmm0 # encoding: [0x67,0x0f,0x28,0x07]
2100 ; X32-SSE-NEXT: retq # encoding: [0xc3]
2102 ; X32-AVX1-LABEL: test_mm_load_si128:
2103 ; X32-AVX1: # %bb.0:
2104 ; X32-AVX1-NEXT: vmovaps (%edi), %xmm0 # encoding: [0x67,0xc5,0xf8,0x28,0x07]
2105 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
2107 ; X32-AVX512-LABEL: test_mm_load_si128:
2108 ; X32-AVX512: # %bb.0:
2109 ; X32-AVX512-NEXT: vmovaps (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x28,0x07]
2110 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
2111 %res = load <2 x i64>, <2 x i64>* %a0, align 16
2115 define <2 x double> @test_mm_load1_pd(double* %a0) nounwind {
2116 ; X86-SSE-LABEL: test_mm_load1_pd:
2118 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2119 ; X86-SSE-NEXT: movsd (%eax), %xmm0 # encoding: [0xf2,0x0f,0x10,0x00]
2120 ; X86-SSE-NEXT: # xmm0 = mem[0],zero
2121 ; X86-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
2122 ; X86-SSE-NEXT: # xmm0 = xmm0[0,0]
2123 ; X86-SSE-NEXT: retl # encoding: [0xc3]
2125 ; X86-AVX1-LABEL: test_mm_load1_pd:
2126 ; X86-AVX1: # %bb.0:
2127 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2128 ; X86-AVX1-NEXT: vmovddup (%eax), %xmm0 # encoding: [0xc5,0xfb,0x12,0x00]
2129 ; X86-AVX1-NEXT: # xmm0 = mem[0,0]
2130 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
2132 ; X86-AVX512-LABEL: test_mm_load1_pd:
2133 ; X86-AVX512: # %bb.0:
2134 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2135 ; X86-AVX512-NEXT: vmovddup (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0x00]
2136 ; X86-AVX512-NEXT: # xmm0 = mem[0,0]
2137 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
2139 ; X64-SSE-LABEL: test_mm_load1_pd:
2141 ; X64-SSE-NEXT: movsd (%rdi), %xmm0 # encoding: [0xf2,0x0f,0x10,0x07]
2142 ; X64-SSE-NEXT: # xmm0 = mem[0],zero
2143 ; X64-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
2144 ; X64-SSE-NEXT: # xmm0 = xmm0[0,0]
2145 ; X64-SSE-NEXT: retq # encoding: [0xc3]
2147 ; X64-AVX1-LABEL: test_mm_load1_pd:
2148 ; X64-AVX1: # %bb.0:
2149 ; X64-AVX1-NEXT: vmovddup (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x12,0x07]
2150 ; X64-AVX1-NEXT: # xmm0 = mem[0,0]
2151 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
2153 ; X64-AVX512-LABEL: test_mm_load1_pd:
2154 ; X64-AVX512: # %bb.0:
2155 ; X64-AVX512-NEXT: vmovddup (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0x07]
2156 ; X64-AVX512-NEXT: # xmm0 = mem[0,0]
2157 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
2159 ; X32-SSE-LABEL: test_mm_load1_pd:
2161 ; X32-SSE-NEXT: movsd (%edi), %xmm0 # encoding: [0x67,0xf2,0x0f,0x10,0x07]
2162 ; X32-SSE-NEXT: # xmm0 = mem[0],zero
2163 ; X32-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
2164 ; X32-SSE-NEXT: # xmm0 = xmm0[0,0]
2165 ; X32-SSE-NEXT: retq # encoding: [0xc3]
2167 ; X32-AVX1-LABEL: test_mm_load1_pd:
2168 ; X32-AVX1: # %bb.0:
2169 ; X32-AVX1-NEXT: vmovddup (%edi), %xmm0 # encoding: [0x67,0xc5,0xfb,0x12,0x07]
2170 ; X32-AVX1-NEXT: # xmm0 = mem[0,0]
2171 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
2173 ; X32-AVX512-LABEL: test_mm_load1_pd:
2174 ; X32-AVX512: # %bb.0:
2175 ; X32-AVX512-NEXT: vmovddup (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x12,0x07]
2176 ; X32-AVX512-NEXT: # xmm0 = mem[0,0]
2177 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
2178 %ld = load double, double* %a0, align 8
2179 %res0 = insertelement <2 x double> undef, double %ld, i32 0
2180 %res1 = insertelement <2 x double> %res0, double %ld, i32 1
2181 ret <2 x double> %res1
2184 define <2 x double> @test_mm_loadh_pd(<2 x double> %a0, double* %a1) nounwind {
2185 ; X86-SSE-LABEL: test_mm_loadh_pd:
2187 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2188 ; X86-SSE-NEXT: movhps (%eax), %xmm0 # encoding: [0x0f,0x16,0x00]
2189 ; X86-SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,1]
2190 ; X86-SSE-NEXT: retl # encoding: [0xc3]
2192 ; X86-AVX1-LABEL: test_mm_loadh_pd:
2193 ; X86-AVX1: # %bb.0:
2194 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2195 ; X86-AVX1-NEXT: vmovhps (%eax), %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x16,0x00]
2196 ; X86-AVX1-NEXT: # xmm0 = xmm0[0,1],mem[0,1]
2197 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
2199 ; X86-AVX512-LABEL: test_mm_loadh_pd:
2200 ; X86-AVX512: # %bb.0:
2201 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2202 ; X86-AVX512-NEXT: vmovhps (%eax), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x16,0x00]
2203 ; X86-AVX512-NEXT: # xmm0 = xmm0[0,1],mem[0,1]
2204 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
2206 ; X64-SSE-LABEL: test_mm_loadh_pd:
2208 ; X64-SSE-NEXT: movhps (%rdi), %xmm0 # encoding: [0x0f,0x16,0x07]
2209 ; X64-SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,1]
2210 ; X64-SSE-NEXT: retq # encoding: [0xc3]
2212 ; X64-AVX1-LABEL: test_mm_loadh_pd:
2213 ; X64-AVX1: # %bb.0:
2214 ; X64-AVX1-NEXT: vmovhps (%rdi), %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x16,0x07]
2215 ; X64-AVX1-NEXT: # xmm0 = xmm0[0,1],mem[0,1]
2216 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
2218 ; X64-AVX512-LABEL: test_mm_loadh_pd:
2219 ; X64-AVX512: # %bb.0:
2220 ; X64-AVX512-NEXT: vmovhps (%rdi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x16,0x07]
2221 ; X64-AVX512-NEXT: # xmm0 = xmm0[0,1],mem[0,1]
2222 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
2224 ; X32-SSE-LABEL: test_mm_loadh_pd:
2226 ; X32-SSE-NEXT: movhps (%edi), %xmm0 # encoding: [0x67,0x0f,0x16,0x07]
2227 ; X32-SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,1]
2228 ; X32-SSE-NEXT: retq # encoding: [0xc3]
2230 ; X32-AVX1-LABEL: test_mm_loadh_pd:
2231 ; X32-AVX1: # %bb.0:
2232 ; X32-AVX1-NEXT: vmovhps (%edi), %xmm0, %xmm0 # encoding: [0x67,0xc5,0xf8,0x16,0x07]
2233 ; X32-AVX1-NEXT: # xmm0 = xmm0[0,1],mem[0,1]
2234 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
2236 ; X32-AVX512-LABEL: test_mm_loadh_pd:
2237 ; X32-AVX512: # %bb.0:
2238 ; X32-AVX512-NEXT: vmovhps (%edi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x16,0x07]
2239 ; X32-AVX512-NEXT: # xmm0 = xmm0[0,1],mem[0,1]
2240 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
2241 %ld = load double, double* %a1, align 8
2242 %res = insertelement <2 x double> %a0, double %ld, i32 1
2243 ret <2 x double> %res
2246 define <2 x i64> @test_mm_loadl_epi64(<2 x i64> %a0, <2 x i64>* %a1) nounwind {
2247 ; X86-SSE-LABEL: test_mm_loadl_epi64:
2249 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2250 ; X86-SSE-NEXT: movsd (%eax), %xmm0 # encoding: [0xf2,0x0f,0x10,0x00]
2251 ; X86-SSE-NEXT: # xmm0 = mem[0],zero
2252 ; X86-SSE-NEXT: retl # encoding: [0xc3]
2254 ; X86-AVX1-LABEL: test_mm_loadl_epi64:
2255 ; X86-AVX1: # %bb.0:
2256 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2257 ; X86-AVX1-NEXT: vmovsd (%eax), %xmm0 # encoding: [0xc5,0xfb,0x10,0x00]
2258 ; X86-AVX1-NEXT: # xmm0 = mem[0],zero
2259 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
2261 ; X86-AVX512-LABEL: test_mm_loadl_epi64:
2262 ; X86-AVX512: # %bb.0:
2263 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2264 ; X86-AVX512-NEXT: vmovsd (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x00]
2265 ; X86-AVX512-NEXT: # xmm0 = mem[0],zero
2266 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
2268 ; X64-SSE-LABEL: test_mm_loadl_epi64:
2270 ; X64-SSE-NEXT: movsd (%rdi), %xmm0 # encoding: [0xf2,0x0f,0x10,0x07]
2271 ; X64-SSE-NEXT: # xmm0 = mem[0],zero
2272 ; X64-SSE-NEXT: retq # encoding: [0xc3]
2274 ; X64-AVX1-LABEL: test_mm_loadl_epi64:
2275 ; X64-AVX1: # %bb.0:
2276 ; X64-AVX1-NEXT: vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
2277 ; X64-AVX1-NEXT: # xmm0 = mem[0],zero
2278 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
2280 ; X64-AVX512-LABEL: test_mm_loadl_epi64:
2281 ; X64-AVX512: # %bb.0:
2282 ; X64-AVX512-NEXT: vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
2283 ; X64-AVX512-NEXT: # xmm0 = mem[0],zero
2284 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
2286 ; X32-SSE-LABEL: test_mm_loadl_epi64:
2288 ; X32-SSE-NEXT: movsd (%edi), %xmm0 # encoding: [0x67,0xf2,0x0f,0x10,0x07]
2289 ; X32-SSE-NEXT: # xmm0 = mem[0],zero
2290 ; X32-SSE-NEXT: retq # encoding: [0xc3]
2292 ; X32-AVX1-LABEL: test_mm_loadl_epi64:
2293 ; X32-AVX1: # %bb.0:
2294 ; X32-AVX1-NEXT: vmovsd (%edi), %xmm0 # encoding: [0x67,0xc5,0xfb,0x10,0x07]
2295 ; X32-AVX1-NEXT: # xmm0 = mem[0],zero
2296 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
2298 ; X32-AVX512-LABEL: test_mm_loadl_epi64:
2299 ; X32-AVX512: # %bb.0:
2300 ; X32-AVX512-NEXT: vmovsd (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x10,0x07]
2301 ; X32-AVX512-NEXT: # xmm0 = mem[0],zero
2302 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
2303 %bc = bitcast <2 x i64>* %a1 to i64*
2304 %ld = load i64, i64* %bc, align 1
2305 %res0 = insertelement <2 x i64> undef, i64 %ld, i32 0
2306 %res1 = insertelement <2 x i64> %res0, i64 0, i32 1
2310 define <2 x double> @test_mm_loadl_pd(<2 x double> %a0, double* %a1) nounwind {
2311 ; X86-SSE-LABEL: test_mm_loadl_pd:
2313 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2314 ; X86-SSE-NEXT: movlps (%eax), %xmm0 # encoding: [0x0f,0x12,0x00]
2315 ; X86-SSE-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
2316 ; X86-SSE-NEXT: retl # encoding: [0xc3]
2318 ; X86-AVX1-LABEL: test_mm_loadl_pd:
2319 ; X86-AVX1: # %bb.0:
2320 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2321 ; X86-AVX1-NEXT: vmovlps (%eax), %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x12,0x00]
2322 ; X86-AVX1-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
2323 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
2325 ; X86-AVX512-LABEL: test_mm_loadl_pd:
2326 ; X86-AVX512: # %bb.0:
2327 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2328 ; X86-AVX512-NEXT: vmovlps (%eax), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x12,0x00]
2329 ; X86-AVX512-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
2330 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
2332 ; X64-SSE-LABEL: test_mm_loadl_pd:
2334 ; X64-SSE-NEXT: movlps (%rdi), %xmm0 # encoding: [0x0f,0x12,0x07]
2335 ; X64-SSE-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
2336 ; X64-SSE-NEXT: retq # encoding: [0xc3]
2338 ; X64-AVX1-LABEL: test_mm_loadl_pd:
2339 ; X64-AVX1: # %bb.0:
2340 ; X64-AVX1-NEXT: vmovlps (%rdi), %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x12,0x07]
2341 ; X64-AVX1-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
2342 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
2344 ; X64-AVX512-LABEL: test_mm_loadl_pd:
2345 ; X64-AVX512: # %bb.0:
2346 ; X64-AVX512-NEXT: vmovlps (%rdi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x12,0x07]
2347 ; X64-AVX512-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
2348 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
2350 ; X32-SSE-LABEL: test_mm_loadl_pd:
2352 ; X32-SSE-NEXT: movlps (%edi), %xmm0 # encoding: [0x67,0x0f,0x12,0x07]
2353 ; X32-SSE-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
2354 ; X32-SSE-NEXT: retq # encoding: [0xc3]
2356 ; X32-AVX1-LABEL: test_mm_loadl_pd:
2357 ; X32-AVX1: # %bb.0:
2358 ; X32-AVX1-NEXT: vmovlps (%edi), %xmm0, %xmm0 # encoding: [0x67,0xc5,0xf8,0x12,0x07]
2359 ; X32-AVX1-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
2360 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
2362 ; X32-AVX512-LABEL: test_mm_loadl_pd:
2363 ; X32-AVX512: # %bb.0:
2364 ; X32-AVX512-NEXT: vmovlps (%edi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x12,0x07]
2365 ; X32-AVX512-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
2366 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
2367 %ld = load double, double* %a1, align 8
2368 %res = insertelement <2 x double> %a0, double %ld, i32 0
2369 ret <2 x double> %res
2372 define <2 x double> @test_mm_loadr_pd(double* %a0) nounwind {
2373 ; X86-SSE-LABEL: test_mm_loadr_pd:
2375 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2376 ; X86-SSE-NEXT: movaps (%eax), %xmm0 # encoding: [0x0f,0x28,0x00]
2377 ; X86-SSE-NEXT: shufps $78, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x4e]
2378 ; X86-SSE-NEXT: # xmm0 = xmm0[2,3,0,1]
2379 ; X86-SSE-NEXT: retl # encoding: [0xc3]
2381 ; X86-AVX1-LABEL: test_mm_loadr_pd:
2382 ; X86-AVX1: # %bb.0:
2383 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2384 ; X86-AVX1-NEXT: vpermilpd $1, (%eax), %xmm0 # encoding: [0xc4,0xe3,0x79,0x05,0x00,0x01]
2385 ; X86-AVX1-NEXT: # xmm0 = mem[1,0]
2386 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
2388 ; X86-AVX512-LABEL: test_mm_loadr_pd:
2389 ; X86-AVX512: # %bb.0:
2390 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2391 ; X86-AVX512-NEXT: vpermilpd $1, (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x05,0x00,0x01]
2392 ; X86-AVX512-NEXT: # xmm0 = mem[1,0]
2393 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
2395 ; X64-SSE-LABEL: test_mm_loadr_pd:
2397 ; X64-SSE-NEXT: movaps (%rdi), %xmm0 # encoding: [0x0f,0x28,0x07]
2398 ; X64-SSE-NEXT: shufps $78, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x4e]
2399 ; X64-SSE-NEXT: # xmm0 = xmm0[2,3,0,1]
2400 ; X64-SSE-NEXT: retq # encoding: [0xc3]
2402 ; X64-AVX1-LABEL: test_mm_loadr_pd:
2403 ; X64-AVX1: # %bb.0:
2404 ; X64-AVX1-NEXT: vpermilpd $1, (%rdi), %xmm0 # encoding: [0xc4,0xe3,0x79,0x05,0x07,0x01]
2405 ; X64-AVX1-NEXT: # xmm0 = mem[1,0]
2406 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
2408 ; X64-AVX512-LABEL: test_mm_loadr_pd:
2409 ; X64-AVX512: # %bb.0:
2410 ; X64-AVX512-NEXT: vpermilpd $1, (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x05,0x07,0x01]
2411 ; X64-AVX512-NEXT: # xmm0 = mem[1,0]
2412 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
2414 ; X32-SSE-LABEL: test_mm_loadr_pd:
2416 ; X32-SSE-NEXT: movaps (%edi), %xmm0 # encoding: [0x67,0x0f,0x28,0x07]
2417 ; X32-SSE-NEXT: shufps $78, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x4e]
2418 ; X32-SSE-NEXT: # xmm0 = xmm0[2,3,0,1]
2419 ; X32-SSE-NEXT: retq # encoding: [0xc3]
2421 ; X32-AVX1-LABEL: test_mm_loadr_pd:
2422 ; X32-AVX1: # %bb.0:
2423 ; X32-AVX1-NEXT: vpermilpd $1, (%edi), %xmm0 # encoding: [0x67,0xc4,0xe3,0x79,0x05,0x07,0x01]
2424 ; X32-AVX1-NEXT: # xmm0 = mem[1,0]
2425 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
2427 ; X32-AVX512-LABEL: test_mm_loadr_pd:
2428 ; X32-AVX512: # %bb.0:
2429 ; X32-AVX512-NEXT: vpermilpd $1, (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc4,0xe3,0x79,0x05,0x07,0x01]
2430 ; X32-AVX512-NEXT: # xmm0 = mem[1,0]
2431 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
2432 %arg0 = bitcast double* %a0 to <2 x double>*
2433 %ld = load <2 x double>, <2 x double>* %arg0, align 16
2434 %res = shufflevector <2 x double> %ld, <2 x double> undef, <2 x i32> <i32 1, i32 0>
2435 ret <2 x double> %res
2438 define <2 x double> @test_mm_loadu_pd(double* %a0) nounwind {
2439 ; X86-SSE-LABEL: test_mm_loadu_pd:
2441 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2442 ; X86-SSE-NEXT: movups (%eax), %xmm0 # encoding: [0x0f,0x10,0x00]
2443 ; X86-SSE-NEXT: retl # encoding: [0xc3]
2445 ; X86-AVX1-LABEL: test_mm_loadu_pd:
2446 ; X86-AVX1: # %bb.0:
2447 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2448 ; X86-AVX1-NEXT: vmovups (%eax), %xmm0 # encoding: [0xc5,0xf8,0x10,0x00]
2449 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
2451 ; X86-AVX512-LABEL: test_mm_loadu_pd:
2452 ; X86-AVX512: # %bb.0:
2453 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2454 ; X86-AVX512-NEXT: vmovups (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x00]
2455 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
2457 ; X64-SSE-LABEL: test_mm_loadu_pd:
2459 ; X64-SSE-NEXT: movups (%rdi), %xmm0 # encoding: [0x0f,0x10,0x07]
2460 ; X64-SSE-NEXT: retq # encoding: [0xc3]
2462 ; X64-AVX1-LABEL: test_mm_loadu_pd:
2463 ; X64-AVX1: # %bb.0:
2464 ; X64-AVX1-NEXT: vmovups (%rdi), %xmm0 # encoding: [0xc5,0xf8,0x10,0x07]
2465 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
2467 ; X64-AVX512-LABEL: test_mm_loadu_pd:
2468 ; X64-AVX512: # %bb.0:
2469 ; X64-AVX512-NEXT: vmovups (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x07]
2470 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
2472 ; X32-SSE-LABEL: test_mm_loadu_pd:
2474 ; X32-SSE-NEXT: movups (%edi), %xmm0 # encoding: [0x67,0x0f,0x10,0x07]
2475 ; X32-SSE-NEXT: retq # encoding: [0xc3]
2477 ; X32-AVX1-LABEL: test_mm_loadu_pd:
2478 ; X32-AVX1: # %bb.0:
2479 ; X32-AVX1-NEXT: vmovups (%edi), %xmm0 # encoding: [0x67,0xc5,0xf8,0x10,0x07]
2480 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
2482 ; X32-AVX512-LABEL: test_mm_loadu_pd:
2483 ; X32-AVX512: # %bb.0:
2484 ; X32-AVX512-NEXT: vmovups (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x10,0x07]
2485 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
2486 %arg0 = bitcast double* %a0 to <2 x double>*
2487 %res = load <2 x double>, <2 x double>* %arg0, align 1
2488 ret <2 x double> %res
2491 define <2 x i64> @test_mm_loadu_si128(<2 x i64>* %a0) nounwind {
2492 ; X86-SSE-LABEL: test_mm_loadu_si128:
2494 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2495 ; X86-SSE-NEXT: movups (%eax), %xmm0 # encoding: [0x0f,0x10,0x00]
2496 ; X86-SSE-NEXT: retl # encoding: [0xc3]
2498 ; X86-AVX1-LABEL: test_mm_loadu_si128:
2499 ; X86-AVX1: # %bb.0:
2500 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2501 ; X86-AVX1-NEXT: vmovups (%eax), %xmm0 # encoding: [0xc5,0xf8,0x10,0x00]
2502 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
2504 ; X86-AVX512-LABEL: test_mm_loadu_si128:
2505 ; X86-AVX512: # %bb.0:
2506 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2507 ; X86-AVX512-NEXT: vmovups (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x00]
2508 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
2510 ; X64-SSE-LABEL: test_mm_loadu_si128:
2512 ; X64-SSE-NEXT: movups (%rdi), %xmm0 # encoding: [0x0f,0x10,0x07]
2513 ; X64-SSE-NEXT: retq # encoding: [0xc3]
2515 ; X64-AVX1-LABEL: test_mm_loadu_si128:
2516 ; X64-AVX1: # %bb.0:
2517 ; X64-AVX1-NEXT: vmovups (%rdi), %xmm0 # encoding: [0xc5,0xf8,0x10,0x07]
2518 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
2520 ; X64-AVX512-LABEL: test_mm_loadu_si128:
2521 ; X64-AVX512: # %bb.0:
2522 ; X64-AVX512-NEXT: vmovups (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x07]
2523 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
2525 ; X32-SSE-LABEL: test_mm_loadu_si128:
2527 ; X32-SSE-NEXT: movups (%edi), %xmm0 # encoding: [0x67,0x0f,0x10,0x07]
2528 ; X32-SSE-NEXT: retq # encoding: [0xc3]
2530 ; X32-AVX1-LABEL: test_mm_loadu_si128:
2531 ; X32-AVX1: # %bb.0:
2532 ; X32-AVX1-NEXT: vmovups (%edi), %xmm0 # encoding: [0x67,0xc5,0xf8,0x10,0x07]
2533 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
2535 ; X32-AVX512-LABEL: test_mm_loadu_si128:
2536 ; X32-AVX512: # %bb.0:
2537 ; X32-AVX512-NEXT: vmovups (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x10,0x07]
2538 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
2539 %res = load <2 x i64>, <2 x i64>* %a0, align 1
2543 define <2 x i64> @test_mm_loadu_si64(i8* nocapture readonly %A) {
2544 ; X86-SSE-LABEL: test_mm_loadu_si64:
2545 ; X86-SSE: # %bb.0: # %entry
2546 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2547 ; X86-SSE-NEXT: movsd (%eax), %xmm0 # encoding: [0xf2,0x0f,0x10,0x00]
2548 ; X86-SSE-NEXT: # xmm0 = mem[0],zero
2549 ; X86-SSE-NEXT: retl # encoding: [0xc3]
2551 ; X86-AVX1-LABEL: test_mm_loadu_si64:
2552 ; X86-AVX1: # %bb.0: # %entry
2553 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2554 ; X86-AVX1-NEXT: vmovsd (%eax), %xmm0 # encoding: [0xc5,0xfb,0x10,0x00]
2555 ; X86-AVX1-NEXT: # xmm0 = mem[0],zero
2556 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
2558 ; X86-AVX512-LABEL: test_mm_loadu_si64:
2559 ; X86-AVX512: # %bb.0: # %entry
2560 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2561 ; X86-AVX512-NEXT: vmovsd (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x00]
2562 ; X86-AVX512-NEXT: # xmm0 = mem[0],zero
2563 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
2565 ; X64-SSE-LABEL: test_mm_loadu_si64:
2566 ; X64-SSE: # %bb.0: # %entry
2567 ; X64-SSE-NEXT: movsd (%rdi), %xmm0 # encoding: [0xf2,0x0f,0x10,0x07]
2568 ; X64-SSE-NEXT: # xmm0 = mem[0],zero
2569 ; X64-SSE-NEXT: retq # encoding: [0xc3]
2571 ; X64-AVX1-LABEL: test_mm_loadu_si64:
2572 ; X64-AVX1: # %bb.0: # %entry
2573 ; X64-AVX1-NEXT: vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
2574 ; X64-AVX1-NEXT: # xmm0 = mem[0],zero
2575 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
2577 ; X64-AVX512-LABEL: test_mm_loadu_si64:
2578 ; X64-AVX512: # %bb.0: # %entry
2579 ; X64-AVX512-NEXT: vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
2580 ; X64-AVX512-NEXT: # xmm0 = mem[0],zero
2581 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
2583 ; X32-SSE-LABEL: test_mm_loadu_si64:
2584 ; X32-SSE: # %bb.0: # %entry
2585 ; X32-SSE-NEXT: movsd (%edi), %xmm0 # encoding: [0x67,0xf2,0x0f,0x10,0x07]
2586 ; X32-SSE-NEXT: # xmm0 = mem[0],zero
2587 ; X32-SSE-NEXT: retq # encoding: [0xc3]
2589 ; X32-AVX1-LABEL: test_mm_loadu_si64:
2590 ; X32-AVX1: # %bb.0: # %entry
2591 ; X32-AVX1-NEXT: vmovsd (%edi), %xmm0 # encoding: [0x67,0xc5,0xfb,0x10,0x07]
2592 ; X32-AVX1-NEXT: # xmm0 = mem[0],zero
2593 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
2595 ; X32-AVX512-LABEL: test_mm_loadu_si64:
2596 ; X32-AVX512: # %bb.0: # %entry
2597 ; X32-AVX512-NEXT: vmovsd (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x10,0x07]
2598 ; X32-AVX512-NEXT: # xmm0 = mem[0],zero
2599 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
2601 %__v.i = bitcast i8* %A to i64*
2602 %0 = load i64, i64* %__v.i, align 1
2603 %vecinit1.i = insertelement <2 x i64> <i64 undef, i64 0>, i64 %0, i32 0
2604 ret <2 x i64> %vecinit1.i
2607 define <2 x i64> @test_mm_loadu_si32(i8* nocapture readonly %A) {
2608 ; X86-SSE-LABEL: test_mm_loadu_si32:
2609 ; X86-SSE: # %bb.0: # %entry
2610 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2611 ; X86-SSE-NEXT: movss (%eax), %xmm0 # encoding: [0xf3,0x0f,0x10,0x00]
2612 ; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
2613 ; X86-SSE-NEXT: retl # encoding: [0xc3]
2615 ; X86-AVX1-LABEL: test_mm_loadu_si32:
2616 ; X86-AVX1: # %bb.0: # %entry
2617 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2618 ; X86-AVX1-NEXT: vmovss (%eax), %xmm0 # encoding: [0xc5,0xfa,0x10,0x00]
2619 ; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
2620 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
2622 ; X86-AVX512-LABEL: test_mm_loadu_si32:
2623 ; X86-AVX512: # %bb.0: # %entry
2624 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2625 ; X86-AVX512-NEXT: vmovss (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x00]
2626 ; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
2627 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
2629 ; X64-SSE-LABEL: test_mm_loadu_si32:
2630 ; X64-SSE: # %bb.0: # %entry
2631 ; X64-SSE-NEXT: movss (%rdi), %xmm0 # encoding: [0xf3,0x0f,0x10,0x07]
2632 ; X64-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
2633 ; X64-SSE-NEXT: retq # encoding: [0xc3]
2635 ; X64-AVX1-LABEL: test_mm_loadu_si32:
2636 ; X64-AVX1: # %bb.0: # %entry
2637 ; X64-AVX1-NEXT: vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
2638 ; X64-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
2639 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
2641 ; X64-AVX512-LABEL: test_mm_loadu_si32:
2642 ; X64-AVX512: # %bb.0: # %entry
2643 ; X64-AVX512-NEXT: vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
2644 ; X64-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
2645 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
2647 ; X32-SSE-LABEL: test_mm_loadu_si32:
2648 ; X32-SSE: # %bb.0: # %entry
2649 ; X32-SSE-NEXT: movss (%edi), %xmm0 # encoding: [0x67,0xf3,0x0f,0x10,0x07]
2650 ; X32-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
2651 ; X32-SSE-NEXT: retq # encoding: [0xc3]
2653 ; X32-AVX1-LABEL: test_mm_loadu_si32:
2654 ; X32-AVX1: # %bb.0: # %entry
2655 ; X32-AVX1-NEXT: vmovss (%edi), %xmm0 # encoding: [0x67,0xc5,0xfa,0x10,0x07]
2656 ; X32-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
2657 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
2659 ; X32-AVX512-LABEL: test_mm_loadu_si32:
2660 ; X32-AVX512: # %bb.0: # %entry
2661 ; X32-AVX512-NEXT: vmovss (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfa,0x10,0x07]
2662 ; X32-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
2663 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
2665 %__v.i = bitcast i8* %A to i32*
2666 %0 = load i32, i32* %__v.i, align 1
2667 %vecinit3.i = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 %0, i32 0
2668 %1 = bitcast <4 x i32> %vecinit3.i to <2 x i64>
2672 define <2 x i64> @test_mm_loadu_si16(i8* nocapture readonly %A) {
2673 ; X86-SSE-LABEL: test_mm_loadu_si16:
2674 ; X86-SSE: # %bb.0: # %entry
2675 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2676 ; X86-SSE-NEXT: movzwl (%eax), %eax # encoding: [0x0f,0xb7,0x00]
2677 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
2678 ; X86-SSE-NEXT: retl # encoding: [0xc3]
2680 ; X86-AVX1-LABEL: test_mm_loadu_si16:
2681 ; X86-AVX1: # %bb.0: # %entry
2682 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2683 ; X86-AVX1-NEXT: movzwl (%eax), %eax # encoding: [0x0f,0xb7,0x00]
2684 ; X86-AVX1-NEXT: vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
2685 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
2687 ; X86-AVX512-LABEL: test_mm_loadu_si16:
2688 ; X86-AVX512: # %bb.0: # %entry
2689 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
2690 ; X86-AVX512-NEXT: movzwl (%eax), %eax # encoding: [0x0f,0xb7,0x00]
2691 ; X86-AVX512-NEXT: vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
2692 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
2694 ; X64-SSE-LABEL: test_mm_loadu_si16:
2695 ; X64-SSE: # %bb.0: # %entry
2696 ; X64-SSE-NEXT: movzwl (%rdi), %eax # encoding: [0x0f,0xb7,0x07]
2697 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
2698 ; X64-SSE-NEXT: retq # encoding: [0xc3]
2700 ; X64-AVX1-LABEL: test_mm_loadu_si16:
2701 ; X64-AVX1: # %bb.0: # %entry
2702 ; X64-AVX1-NEXT: movzwl (%rdi), %eax # encoding: [0x0f,0xb7,0x07]
2703 ; X64-AVX1-NEXT: vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
2704 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
2706 ; X64-AVX512-LABEL: test_mm_loadu_si16:
2707 ; X64-AVX512: # %bb.0: # %entry
2708 ; X64-AVX512-NEXT: movzwl (%rdi), %eax # encoding: [0x0f,0xb7,0x07]
2709 ; X64-AVX512-NEXT: vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
2710 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
2712 ; X32-SSE-LABEL: test_mm_loadu_si16:
2713 ; X32-SSE: # %bb.0: # %entry
2714 ; X32-SSE-NEXT: movzwl (%edi), %eax # encoding: [0x67,0x0f,0xb7,0x07]
2715 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
2716 ; X32-SSE-NEXT: retq # encoding: [0xc3]
2718 ; X32-AVX1-LABEL: test_mm_loadu_si16:
2719 ; X32-AVX1: # %bb.0: # %entry
2720 ; X32-AVX1-NEXT: movzwl (%edi), %eax # encoding: [0x67,0x0f,0xb7,0x07]
2721 ; X32-AVX1-NEXT: vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
2722 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
2724 ; X32-AVX512-LABEL: test_mm_loadu_si16:
2725 ; X32-AVX512: # %bb.0: # %entry
2726 ; X32-AVX512-NEXT: movzwl (%edi), %eax # encoding: [0x67,0x0f,0xb7,0x07]
2727 ; X32-AVX512-NEXT: vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
2728 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
2730 %__v.i = bitcast i8* %A to i16*
2731 %0 = load i16, i16* %__v.i, align 1
2732 %vecinit7.i = insertelement <8 x i16> <i16 undef, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, i16 %0, i32 0
2733 %1 = bitcast <8 x i16> %vecinit7.i to <2 x i64>
2737 define <2 x i64> @test_mm_madd_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
2738 ; SSE-LABEL: test_mm_madd_epi16:
2740 ; SSE-NEXT: pmaddwd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf5,0xc1]
2741 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2743 ; AVX1-LABEL: test_mm_madd_epi16:
2745 ; AVX1-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf5,0xc1]
2746 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2748 ; AVX512-LABEL: test_mm_madd_epi16:
2750 ; AVX512-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf5,0xc1]
2751 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2752 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
2753 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
2754 %res = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %arg0, <8 x i16> %arg1)
2755 %bc = bitcast <4 x i32> %res to <2 x i64>
2758 declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnone
2760 define void @test_mm_maskmoveu_si128(<2 x i64> %a0, <2 x i64> %a1, i8* %a2) nounwind {
2761 ; X86-SSE-LABEL: test_mm_maskmoveu_si128:
2763 ; X86-SSE-NEXT: pushl %edi # encoding: [0x57]
2764 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi # encoding: [0x8b,0x7c,0x24,0x08]
2765 ; X86-SSE-NEXT: maskmovdqu %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf7,0xc1]
2766 ; X86-SSE-NEXT: popl %edi # encoding: [0x5f]
2767 ; X86-SSE-NEXT: retl # encoding: [0xc3]
2769 ; X86-AVX-LABEL: test_mm_maskmoveu_si128:
2771 ; X86-AVX-NEXT: pushl %edi # encoding: [0x57]
2772 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edi # encoding: [0x8b,0x7c,0x24,0x08]
2773 ; X86-AVX-NEXT: vmaskmovdqu %xmm1, %xmm0 # encoding: [0xc5,0xf9,0xf7,0xc1]
2774 ; X86-AVX-NEXT: popl %edi # encoding: [0x5f]
2775 ; X86-AVX-NEXT: retl # encoding: [0xc3]
2777 ; X64-SSE-LABEL: test_mm_maskmoveu_si128:
2779 ; X64-SSE-NEXT: maskmovdqu %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf7,0xc1]
2780 ; X64-SSE-NEXT: retq # encoding: [0xc3]
2782 ; X64-AVX-LABEL: test_mm_maskmoveu_si128:
2784 ; X64-AVX-NEXT: vmaskmovdqu %xmm1, %xmm0 # encoding: [0xc5,0xf9,0xf7,0xc1]
2785 ; X64-AVX-NEXT: retq # encoding: [0xc3]
2787 ; X32-SSE-LABEL: test_mm_maskmoveu_si128:
2789 ; X32-SSE-NEXT: # kill: def $edi killed $edi killed $rdi
2790 ; X32-SSE-NEXT: addr32 maskmovdqu %xmm1, %xmm0 # encoding: [0x67,0x66,0x0f,0xf7,0xc1]
2791 ; X32-SSE-NEXT: retq # encoding: [0xc3]
2793 ; X32-AVX-LABEL: test_mm_maskmoveu_si128:
2795 ; X32-AVX-NEXT: # kill: def $edi killed $edi killed $rdi
2796 ; X32-AVX-NEXT: addr32 vmaskmovdqu %xmm1, %xmm0 # encoding: [0x67,0xc5,0xf9,0xf7,0xc1]
2797 ; X32-AVX-NEXT: retq # encoding: [0xc3]
2798 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
2799 %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
2800 call void @llvm.x86.sse2.maskmov.dqu(<16 x i8> %arg0, <16 x i8> %arg1, i8* %a2)
2803 declare void @llvm.x86.sse2.maskmov.dqu(<16 x i8>, <16 x i8>, i8*) nounwind
2805 define <2 x i64> @test_mm_max_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
2806 ; SSE-LABEL: test_mm_max_epi16:
2808 ; SSE-NEXT: pmaxsw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xee,0xc1]
2809 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2811 ; AVX1-LABEL: test_mm_max_epi16:
2813 ; AVX1-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xee,0xc1]
2814 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2816 ; AVX512-LABEL: test_mm_max_epi16:
2818 ; AVX512-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xee,0xc1]
2819 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2820 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
2821 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
2822 %sel = call <8 x i16> @llvm.smax.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
2823 %bc = bitcast <8 x i16> %sel to <2 x i64>
2826 declare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>)
2828 define <2 x i64> @test_mm_max_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
2829 ; SSE-LABEL: test_mm_max_epu8:
2831 ; SSE-NEXT: pmaxub %xmm1, %xmm0 # encoding: [0x66,0x0f,0xde,0xc1]
2832 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2834 ; AVX1-LABEL: test_mm_max_epu8:
2836 ; AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xde,0xc1]
2837 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2839 ; AVX512-LABEL: test_mm_max_epu8:
2841 ; AVX512-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xde,0xc1]
2842 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2843 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
2844 %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
2845 %sel = call <16 x i8> @llvm.umax.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
2846 %bc = bitcast <16 x i8> %sel to <2 x i64>
2849 declare <16 x i8> @llvm.umax.v16i8(<16 x i8>, <16 x i8>)
2851 define <2 x double> @test_mm_max_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
2852 ; SSE-LABEL: test_mm_max_pd:
2854 ; SSE-NEXT: maxpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x5f,0xc1]
2855 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2857 ; AVX1-LABEL: test_mm_max_pd:
2859 ; AVX1-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x5f,0xc1]
2860 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2862 ; AVX512-LABEL: test_mm_max_pd:
2864 ; AVX512-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5f,0xc1]
2865 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2866 %res = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1)
2867 ret <2 x double> %res
2869 declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind readnone
2871 define <2 x double> @test_mm_max_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
2872 ; SSE-LABEL: test_mm_max_sd:
2874 ; SSE-NEXT: maxsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x5f,0xc1]
2875 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2877 ; AVX1-LABEL: test_mm_max_sd:
2879 ; AVX1-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x5f,0xc1]
2880 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2882 ; AVX512-LABEL: test_mm_max_sd:
2884 ; AVX512-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5f,0xc1]
2885 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2886 %res = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1)
2887 ret <2 x double> %res
2889 declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind readnone
2891 define void @test_mm_mfence() nounwind {
2892 ; CHECK-LABEL: test_mm_mfence:
2894 ; CHECK-NEXT: mfence # encoding: [0x0f,0xae,0xf0]
2895 ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2896 call void @llvm.x86.sse2.mfence()
2899 declare void @llvm.x86.sse2.mfence() nounwind readnone
2901 define <2 x i64> @test_mm_min_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
2902 ; SSE-LABEL: test_mm_min_epi16:
2904 ; SSE-NEXT: pminsw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xea,0xc1]
2905 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2907 ; AVX1-LABEL: test_mm_min_epi16:
2909 ; AVX1-NEXT: vpminsw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xea,0xc1]
2910 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2912 ; AVX512-LABEL: test_mm_min_epi16:
2914 ; AVX512-NEXT: vpminsw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xea,0xc1]
2915 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2916 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
2917 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
2918 %sel = call <8 x i16> @llvm.smin.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
2919 %bc = bitcast <8 x i16> %sel to <2 x i64>
2922 declare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>)
2924 define <2 x i64> @test_mm_min_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
2925 ; SSE-LABEL: test_mm_min_epu8:
2927 ; SSE-NEXT: pminub %xmm1, %xmm0 # encoding: [0x66,0x0f,0xda,0xc1]
2928 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2930 ; AVX1-LABEL: test_mm_min_epu8:
2932 ; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xda,0xc1]
2933 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2935 ; AVX512-LABEL: test_mm_min_epu8:
2937 ; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xda,0xc1]
2938 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2939 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
2940 %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
2941 %sel = call <16 x i8> @llvm.umin.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
2942 %bc = bitcast <16 x i8> %sel to <2 x i64>
2945 declare <16 x i8> @llvm.umin.v16i8(<16 x i8>, <16 x i8>)
2947 define <2 x double> @test_mm_min_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
2948 ; SSE-LABEL: test_mm_min_pd:
2950 ; SSE-NEXT: minpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x5d,0xc1]
2951 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2953 ; AVX1-LABEL: test_mm_min_pd:
2955 ; AVX1-NEXT: vminpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x5d,0xc1]
2956 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2958 ; AVX512-LABEL: test_mm_min_pd:
2960 ; AVX512-NEXT: vminpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5d,0xc1]
2961 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2962 %res = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1)
2963 ret <2 x double> %res
2965 declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone
2967 define <2 x double> @test_mm_min_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
2968 ; SSE-LABEL: test_mm_min_sd:
2970 ; SSE-NEXT: minsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x5d,0xc1]
2971 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2973 ; AVX1-LABEL: test_mm_min_sd:
2975 ; AVX1-NEXT: vminsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x5d,0xc1]
2976 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2978 ; AVX512-LABEL: test_mm_min_sd:
2980 ; AVX512-NEXT: vminsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5d,0xc1]
2981 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2982 %res = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1)
2983 ret <2 x double> %res
2985 declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind readnone
2987 define <2 x i64> @test_mm_move_epi64(<2 x i64> %a0) nounwind {
2988 ; SSE-LABEL: test_mm_move_epi64:
2990 ; SSE-NEXT: movq %xmm0, %xmm0 # encoding: [0xf3,0x0f,0x7e,0xc0]
2991 ; SSE-NEXT: # xmm0 = xmm0[0],zero
2992 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
2994 ; AVX1-LABEL: test_mm_move_epi64:
2996 ; AVX1-NEXT: vmovq %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x7e,0xc0]
2997 ; AVX1-NEXT: # xmm0 = xmm0[0],zero
2998 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3000 ; AVX512-LABEL: test_mm_move_epi64:
3002 ; AVX512-NEXT: vmovq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc0]
3003 ; AVX512-NEXT: # xmm0 = xmm0[0],zero
3004 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3005 %res = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
3009 define <2 x double> @test_mm_move_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
3010 ; SSE-LABEL: test_mm_move_sd:
3012 ; SSE-NEXT: movsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x10,0xc1]
3013 ; SSE-NEXT: # xmm0 = xmm1[0],xmm0[1]
3014 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3016 ; AVX-LABEL: test_mm_move_sd:
3018 ; AVX-NEXT: vblendps $3, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x03]
3019 ; AVX-NEXT: # xmm0 = xmm1[0,1],xmm0[2,3]
3020 ; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3021 %ext0 = extractelement <2 x double> %a1, i32 0
3022 %res0 = insertelement <2 x double> undef, double %ext0, i32 0
3023 %ext1 = extractelement <2 x double> %a0, i32 1
3024 %res1 = insertelement <2 x double> %res0, double %ext1, i32 1
3025 ret <2 x double> %res1
3028 define i32 @test_mm_movemask_epi8(<2 x i64> %a0) nounwind {
3029 ; SSE-LABEL: test_mm_movemask_epi8:
3031 ; SSE-NEXT: pmovmskb %xmm0, %eax # encoding: [0x66,0x0f,0xd7,0xc0]
3032 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3034 ; AVX-LABEL: test_mm_movemask_epi8:
3036 ; AVX-NEXT: vpmovmskb %xmm0, %eax # encoding: [0xc5,0xf9,0xd7,0xc0]
3037 ; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3038 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
3039 %res = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %arg0)
3042 declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone
3044 define i32 @test_mm_movemask_pd(<2 x double> %a0) nounwind {
3045 ; SSE-LABEL: test_mm_movemask_pd:
3047 ; SSE-NEXT: movmskpd %xmm0, %eax # encoding: [0x66,0x0f,0x50,0xc0]
3048 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3050 ; AVX-LABEL: test_mm_movemask_pd:
3052 ; AVX-NEXT: vmovmskpd %xmm0, %eax # encoding: [0xc5,0xf9,0x50,0xc0]
3053 ; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3054 %res = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0)
3057 declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone
3059 define <2 x i64> @test_mm_mul_epu32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
3060 ; SSE-LABEL: test_mm_mul_epu32:
3062 ; SSE-NEXT: pmuludq %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf4,0xc1]
3063 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3065 ; AVX1-LABEL: test_mm_mul_epu32:
3067 ; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf4,0xc1]
3068 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3070 ; AVX512-LABEL: test_mm_mul_epu32:
3072 ; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
3073 ; AVX512-NEXT: vpblendd $10, %xmm2, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x02,0xc2,0x0a]
3074 ; AVX512-NEXT: # xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
3075 ; AVX512-NEXT: vpblendd $10, %xmm2, %xmm1, %xmm1 # encoding: [0xc4,0xe3,0x71,0x02,0xca,0x0a]
3076 ; AVX512-NEXT: # xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
3077 ; AVX512-NEXT: vpmullq %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x40,0xc1]
3078 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3079 %A = and <2 x i64> %a0, <i64 4294967295, i64 4294967295>
3080 %B = and <2 x i64> %a1, <i64 4294967295, i64 4294967295>
3081 %res = mul nuw <2 x i64> %A, %B
3085 define <2 x double> @test_mm_mul_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
3086 ; SSE-LABEL: test_mm_mul_pd:
3088 ; SSE-NEXT: mulpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x59,0xc1]
3089 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3091 ; AVX1-LABEL: test_mm_mul_pd:
3093 ; AVX1-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x59,0xc1]
3094 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3096 ; AVX512-LABEL: test_mm_mul_pd:
3098 ; AVX512-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x59,0xc1]
3099 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3100 %res = fmul <2 x double> %a0, %a1
3101 ret <2 x double> %res
3104 define <2 x double> @test_mm_mul_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
3105 ; SSE-LABEL: test_mm_mul_sd:
3107 ; SSE-NEXT: mulsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x59,0xc1]
3108 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3110 ; AVX1-LABEL: test_mm_mul_sd:
3112 ; AVX1-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x59,0xc1]
3113 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3115 ; AVX512-LABEL: test_mm_mul_sd:
3117 ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x59,0xc1]
3118 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3119 %ext0 = extractelement <2 x double> %a0, i32 0
3120 %ext1 = extractelement <2 x double> %a1, i32 0
3121 %fmul = fmul double %ext0, %ext1
3122 %res = insertelement <2 x double> %a0, double %fmul, i32 0
3123 ret <2 x double> %res
3126 define <2 x i64> @test_mm_mulhi_epi16(<2 x i64> %a0, <2 x i64> %a1) {
3127 ; SSE-LABEL: test_mm_mulhi_epi16:
3129 ; SSE-NEXT: pmulhw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xe5,0xc1]
3130 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3132 ; AVX1-LABEL: test_mm_mulhi_epi16:
3134 ; AVX1-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe5,0xc1]
3135 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3137 ; AVX512-LABEL: test_mm_mulhi_epi16:
3139 ; AVX512-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe5,0xc1]
3140 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3141 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
3142 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
3143 %res = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %arg0, <8 x i16> %arg1)
3144 %bc = bitcast <8 x i16> %res to <2 x i64>
3147 declare <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16>, <8 x i16>) nounwind readnone
3149 define <2 x i64> @test_mm_mulhi_epu16(<2 x i64> %a0, <2 x i64> %a1) {
3150 ; SSE-LABEL: test_mm_mulhi_epu16:
3152 ; SSE-NEXT: pmulhuw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xe4,0xc1]
3153 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3155 ; AVX1-LABEL: test_mm_mulhi_epu16:
3157 ; AVX1-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe4,0xc1]
3158 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3160 ; AVX512-LABEL: test_mm_mulhi_epu16:
3162 ; AVX512-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe4,0xc1]
3163 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3164 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
3165 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
3166 %res = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %arg0, <8 x i16> %arg1)
3167 %bc = bitcast <8 x i16> %res to <2 x i64>
3170 declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>) nounwind readnone
3172 define <2 x i64> @test_mm_mullo_epi16(<2 x i64> %a0, <2 x i64> %a1) {
3173 ; SSE-LABEL: test_mm_mullo_epi16:
3175 ; SSE-NEXT: pmullw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd5,0xc1]
3176 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3178 ; AVX1-LABEL: test_mm_mullo_epi16:
3180 ; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd5,0xc1]
3181 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3183 ; AVX512-LABEL: test_mm_mullo_epi16:
3185 ; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd5,0xc1]
3186 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3187 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
3188 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
3189 %res = mul <8 x i16> %arg0, %arg1
3190 %bc = bitcast <8 x i16> %res to <2 x i64>
3194 define <2 x double> @test_mm_or_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
3195 ; SSE-LABEL: test_mm_or_pd:
3197 ; SSE-NEXT: orps %xmm1, %xmm0 # encoding: [0x0f,0x56,0xc1]
3198 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3200 ; AVX1-LABEL: test_mm_or_pd:
3202 ; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x56,0xc1]
3203 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3205 ; AVX512-LABEL: test_mm_or_pd:
3207 ; AVX512-NEXT: vorps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x56,0xc1]
3208 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3209 %arg0 = bitcast <2 x double> %a0 to <4 x i32>
3210 %arg1 = bitcast <2 x double> %a1 to <4 x i32>
3211 %res = or <4 x i32> %arg0, %arg1
3212 %bc = bitcast <4 x i32> %res to <2 x double>
3213 ret <2 x double> %bc
3216 define <2 x i64> @test_mm_or_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
3217 ; SSE-LABEL: test_mm_or_si128:
3219 ; SSE-NEXT: orps %xmm1, %xmm0 # encoding: [0x0f,0x56,0xc1]
3220 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3222 ; AVX1-LABEL: test_mm_or_si128:
3224 ; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x56,0xc1]
3225 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3227 ; AVX512-LABEL: test_mm_or_si128:
3229 ; AVX512-NEXT: vorps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x56,0xc1]
3230 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3231 %res = or <2 x i64> %a0, %a1
3235 define <2 x i64> @test_mm_packs_epi16(<2 x i64> %a0, <2 x i64> %a1) {
3236 ; SSE-LABEL: test_mm_packs_epi16:
3238 ; SSE-NEXT: packsswb %xmm1, %xmm0 # encoding: [0x66,0x0f,0x63,0xc1]
3239 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3241 ; AVX1-LABEL: test_mm_packs_epi16:
3243 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x63,0xc1]
3244 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3246 ; AVX512-LABEL: test_mm_packs_epi16:
3248 ; AVX512-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x63,0xc1]
3249 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3250 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
3251 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
3252 %res = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %arg0, <8 x i16> %arg1)
3253 %bc = bitcast <16 x i8> %res to <2 x i64>
3256 declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind readnone
3258 define <2 x i64> @test_mm_packs_epi32(<2 x i64> %a0, <2 x i64> %a1) {
3259 ; SSE-LABEL: test_mm_packs_epi32:
3261 ; SSE-NEXT: packssdw %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6b,0xc1]
3262 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3264 ; AVX1-LABEL: test_mm_packs_epi32:
3266 ; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x6b,0xc1]
3267 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3269 ; AVX512-LABEL: test_mm_packs_epi32:
3271 ; AVX512-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6b,0xc1]
3272 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3273 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
3274 %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
3275 %res = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %arg0, <4 x i32> %arg1)
3276 %bc = bitcast <8 x i16> %res to <2 x i64>
3279 declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind readnone
3281 define <2 x i64> @test_mm_packus_epi16(<2 x i64> %a0, <2 x i64> %a1) {
3282 ; SSE-LABEL: test_mm_packus_epi16:
3284 ; SSE-NEXT: packuswb %xmm1, %xmm0 # encoding: [0x66,0x0f,0x67,0xc1]
3285 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3287 ; AVX1-LABEL: test_mm_packus_epi16:
3289 ; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x67,0xc1]
3290 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3292 ; AVX512-LABEL: test_mm_packus_epi16:
3294 ; AVX512-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x67,0xc1]
3295 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3296 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
3297 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
3298 %res = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %arg0, <8 x i16> %arg1)
3299 %bc = bitcast <16 x i8> %res to <2 x i64>
3302 declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind readnone
3304 define void @test_mm_pause() nounwind {
3305 ; CHECK-LABEL: test_mm_pause:
3307 ; CHECK-NEXT: pause # encoding: [0xf3,0x90]
3308 ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3309 call void @llvm.x86.sse2.pause()
3312 declare void @llvm.x86.sse2.pause() nounwind readnone
3314 define <2 x i64> @test_mm_sad_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
3315 ; SSE-LABEL: test_mm_sad_epu8:
3317 ; SSE-NEXT: psadbw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf6,0xc1]
3318 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3320 ; AVX1-LABEL: test_mm_sad_epu8:
3322 ; AVX1-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf6,0xc1]
3323 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3325 ; AVX512-LABEL: test_mm_sad_epu8:
3327 ; AVX512-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf6,0xc1]
3328 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
3329 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
3330 %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
3331 %res = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %arg0, <16 x i8> %arg1)
3334 declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
3336 define <2 x i64> @test_mm_set_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15) nounwind {
3337 ; X86-SSE-LABEL: test_mm_set_epi8:
3339 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
3340 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3341 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
3342 ; X86-SSE-NEXT: movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
3343 ; X86-SSE-NEXT: punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
3344 ; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
3345 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x0c]
3346 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3347 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
3348 ; X86-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
3349 ; X86-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
3350 ; X86-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
3351 ; X86-SSE-NEXT: punpcklwd %xmm1, %xmm2 # encoding: [0x66,0x0f,0x61,0xd1]
3352 ; X86-SSE-NEXT: # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
3353 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x14]
3354 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3355 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
3356 ; X86-SSE-NEXT: movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
3357 ; X86-SSE-NEXT: punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
3358 ; X86-SSE-NEXT: # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
3359 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x1c]
3360 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3361 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
3362 ; X86-SSE-NEXT: movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
3363 ; X86-SSE-NEXT: punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
3364 ; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
3365 ; X86-SSE-NEXT: punpcklwd %xmm3, %xmm1 # encoding: [0x66,0x0f,0x61,0xcb]
3366 ; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
3367 ; X86-SSE-NEXT: punpckldq %xmm2, %xmm1 # encoding: [0x66,0x0f,0x62,0xca]
3368 ; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
3369 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x24]
3370 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3371 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
3372 ; X86-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
3373 ; X86-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
3374 ; X86-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
3375 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x2c]
3376 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3377 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
3378 ; X86-SSE-NEXT: movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
3379 ; X86-SSE-NEXT: punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
3380 ; X86-SSE-NEXT: # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
3381 ; X86-SSE-NEXT: punpcklwd %xmm2, %xmm3 # encoding: [0x66,0x0f,0x61,0xda]
3382 ; X86-SSE-NEXT: # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
3383 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x34]
3384 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3385 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
3386 ; X86-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
3387 ; X86-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
3388 ; X86-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
3389 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x3c]
3390 ; X86-SSE-NEXT: movd %eax, %xmm4 # encoding: [0x66,0x0f,0x6e,0xe0]
3391 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
3392 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3393 ; X86-SSE-NEXT: punpcklbw %xmm4, %xmm0 # encoding: [0x66,0x0f,0x60,0xc4]
3394 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
3395 ; X86-SSE-NEXT: punpcklwd %xmm2, %xmm0 # encoding: [0x66,0x0f,0x61,0xc2]
3396 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
3397 ; X86-SSE-NEXT: punpckldq %xmm3, %xmm0 # encoding: [0x66,0x0f,0x62,0xc3]
3398 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
3399 ; X86-SSE-NEXT: punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
3400 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
3401 ; X86-SSE-NEXT: retl # encoding: [0xc3]
3403 ; X86-AVX1-LABEL: test_mm_set_epi8:
3404 ; X86-AVX1: # %bb.0:
3405 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x3c]
3406 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x40]
3407 ; X86-AVX1-NEXT: vmovd %ecx, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc1]
3408 ; X86-AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x01]
3409 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
3410 ; X86-AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
3411 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x34]
3412 ; X86-AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
3413 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
3414 ; X86-AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
3415 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x2c]
3416 ; X86-AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
3417 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
3418 ; X86-AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
3419 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x24]
3420 ; X86-AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
3421 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
3422 ; X86-AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
3423 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x1c]
3424 ; X86-AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
3425 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
3426 ; X86-AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
3427 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x14]
3428 ; X86-AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
3429 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
3430 ; X86-AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
3431 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x0c]
3432 ; X86-AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
3433 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
3434 ; X86-AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
3435 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
3436 ; X86-AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
3437 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
3439 ; X86-AVX512-LABEL: test_mm_set_epi8:
3440 ; X86-AVX512: # %bb.0:
3441 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x3c]
3442 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x40]
3443 ; X86-AVX512-NEXT: vmovd %ecx, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
3444 ; X86-AVX512-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x01]
3445 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
3446 ; X86-AVX512-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
3447 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x34]
3448 ; X86-AVX512-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
3449 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
3450 ; X86-AVX512-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
3451 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x2c]
3452 ; X86-AVX512-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
3453 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
3454 ; X86-AVX512-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
3455 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x24]
3456 ; X86-AVX512-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
3457 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
3458 ; X86-AVX512-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
3459 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x1c]
3460 ; X86-AVX512-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
3461 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
3462 ; X86-AVX512-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
3463 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x14]
3464 ; X86-AVX512-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
3465 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
3466 ; X86-AVX512-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
3467 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x0c]
3468 ; X86-AVX512-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
3469 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
3470 ; X86-AVX512-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
3471 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
3472 ; X86-AVX512-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
3473 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
3475 ; X64-SSE-LABEL: test_mm_set_epi8:
3477 ; X64-SSE-NEXT: movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
3478 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3479 ; X64-SSE-NEXT: movzbl %sil, %eax # encoding: [0x40,0x0f,0xb6,0xc6]
3480 ; X64-SSE-NEXT: movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
3481 ; X64-SSE-NEXT: punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
3482 ; X64-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
3483 ; X64-SSE-NEXT: movzbl %dl, %eax # encoding: [0x0f,0xb6,0xc2]
3484 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3485 ; X64-SSE-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
3486 ; X64-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
3487 ; X64-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
3488 ; X64-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
3489 ; X64-SSE-NEXT: punpcklwd %xmm1, %xmm2 # encoding: [0x66,0x0f,0x61,0xd1]
3490 ; X64-SSE-NEXT: # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
3491 ; X64-SSE-NEXT: movzbl %r8b, %eax # encoding: [0x41,0x0f,0xb6,0xc0]
3492 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3493 ; X64-SSE-NEXT: movzbl %r9b, %eax # encoding: [0x41,0x0f,0xb6,0xc1]
3494 ; X64-SSE-NEXT: movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
3495 ; X64-SSE-NEXT: punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
3496 ; X64-SSE-NEXT: # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
3497 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
3498 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3499 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
3500 ; X64-SSE-NEXT: movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
3501 ; X64-SSE-NEXT: punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
3502 ; X64-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
3503 ; X64-SSE-NEXT: punpcklwd %xmm3, %xmm1 # encoding: [0x66,0x0f,0x61,0xcb]
3504 ; X64-SSE-NEXT: # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
3505 ; X64-SSE-NEXT: punpckldq %xmm2, %xmm1 # encoding: [0x66,0x0f,0x62,0xca]
3506 ; X64-SSE-NEXT: # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
3507 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
3508 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3509 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
3510 ; X64-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
3511 ; X64-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
3512 ; X64-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
3513 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
3514 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3515 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
3516 ; X64-SSE-NEXT: movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
3517 ; X64-SSE-NEXT: punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
3518 ; X64-SSE-NEXT: # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
3519 ; X64-SSE-NEXT: punpcklwd %xmm2, %xmm3 # encoding: [0x66,0x0f,0x61,0xda]
3520 ; X64-SSE-NEXT: # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
3521 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
3522 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3523 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
3524 ; X64-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
3525 ; X64-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
3526 ; X64-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
3527 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x48]
3528 ; X64-SSE-NEXT: movd %eax, %xmm4 # encoding: [0x66,0x0f,0x6e,0xe0]
3529 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x50]
3530 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3531 ; X64-SSE-NEXT: punpcklbw %xmm4, %xmm0 # encoding: [0x66,0x0f,0x60,0xc4]
3532 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
3533 ; X64-SSE-NEXT: punpcklwd %xmm2, %xmm0 # encoding: [0x66,0x0f,0x61,0xc2]
3534 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
3535 ; X64-SSE-NEXT: punpckldq %xmm3, %xmm0 # encoding: [0x66,0x0f,0x62,0xc3]
3536 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
3537 ; X64-SSE-NEXT: punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
3538 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
3539 ; X64-SSE-NEXT: retq # encoding: [0xc3]
3541 ; X64-AVX1-LABEL: test_mm_set_epi8:
3542 ; X64-AVX1: # %bb.0:
3543 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %r10d # encoding: [0x44,0x0f,0xb6,0x54,0x24,0x48]
3544 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x50]
3545 ; X64-AVX1-NEXT: vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
3546 ; X64-AVX1-NEXT: vpinsrb $1, %r10d, %xmm0, %xmm0 # encoding: [0xc4,0xc3,0x79,0x20,0xc2,0x01]
3547 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
3548 ; X64-AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
3549 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
3550 ; X64-AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
3551 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
3552 ; X64-AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
3553 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
3554 ; X64-AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
3555 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
3556 ; X64-AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
3557 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
3558 ; X64-AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
3559 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
3560 ; X64-AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
3561 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
3562 ; X64-AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
3563 ; X64-AVX1-NEXT: movzbl %r9b, %eax # encoding: [0x41,0x0f,0xb6,0xc1]
3564 ; X64-AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
3565 ; X64-AVX1-NEXT: movzbl %r8b, %eax # encoding: [0x41,0x0f,0xb6,0xc0]
3566 ; X64-AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
3567 ; X64-AVX1-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
3568 ; X64-AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
3569 ; X64-AVX1-NEXT: movzbl %dl, %eax # encoding: [0x0f,0xb6,0xc2]
3570 ; X64-AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
3571 ; X64-AVX1-NEXT: movzbl %sil, %eax # encoding: [0x40,0x0f,0xb6,0xc6]
3572 ; X64-AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
3573 ; X64-AVX1-NEXT: movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
3574 ; X64-AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
3575 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
3577 ; X64-AVX512-LABEL: test_mm_set_epi8:
3578 ; X64-AVX512: # %bb.0:
3579 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %r10d # encoding: [0x44,0x0f,0xb6,0x54,0x24,0x48]
3580 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x50]
3581 ; X64-AVX512-NEXT: vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
3582 ; X64-AVX512-NEXT: vpinsrb $1, %r10d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc3,0x79,0x20,0xc2,0x01]
3583 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
3584 ; X64-AVX512-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
3585 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
3586 ; X64-AVX512-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
3587 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
3588 ; X64-AVX512-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
3589 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
3590 ; X64-AVX512-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
3591 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
3592 ; X64-AVX512-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
3593 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
3594 ; X64-AVX512-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
3595 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
3596 ; X64-AVX512-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
3597 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
3598 ; X64-AVX512-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
3599 ; X64-AVX512-NEXT: movzbl %r9b, %eax # encoding: [0x41,0x0f,0xb6,0xc1]
3600 ; X64-AVX512-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
3601 ; X64-AVX512-NEXT: movzbl %r8b, %eax # encoding: [0x41,0x0f,0xb6,0xc0]
3602 ; X64-AVX512-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
3603 ; X64-AVX512-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
3604 ; X64-AVX512-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
3605 ; X64-AVX512-NEXT: movzbl %dl, %eax # encoding: [0x0f,0xb6,0xc2]
3606 ; X64-AVX512-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
3607 ; X64-AVX512-NEXT: movzbl %sil, %eax # encoding: [0x40,0x0f,0xb6,0xc6]
3608 ; X64-AVX512-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
3609 ; X64-AVX512-NEXT: movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
3610 ; X64-AVX512-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
3611 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
3613 ; X32-SSE-LABEL: test_mm_set_epi8:
3615 ; X32-SSE-NEXT: movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
3616 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3617 ; X32-SSE-NEXT: movzbl %sil, %eax # encoding: [0x40,0x0f,0xb6,0xc6]
3618 ; X32-SSE-NEXT: movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
3619 ; X32-SSE-NEXT: punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
3620 ; X32-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
3621 ; X32-SSE-NEXT: movzbl %dl, %eax # encoding: [0x0f,0xb6,0xc2]
3622 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3623 ; X32-SSE-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
3624 ; X32-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
3625 ; X32-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
3626 ; X32-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
3627 ; X32-SSE-NEXT: punpcklwd %xmm1, %xmm2 # encoding: [0x66,0x0f,0x61,0xd1]
3628 ; X32-SSE-NEXT: # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
3629 ; X32-SSE-NEXT: movzbl %r8b, %eax # encoding: [0x41,0x0f,0xb6,0xc0]
3630 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3631 ; X32-SSE-NEXT: movzbl %r9b, %eax # encoding: [0x41,0x0f,0xb6,0xc1]
3632 ; X32-SSE-NEXT: movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
3633 ; X32-SSE-NEXT: punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
3634 ; X32-SSE-NEXT: # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
3635 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x08]
3636 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3637 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x10]
3638 ; X32-SSE-NEXT: movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
3639 ; X32-SSE-NEXT: punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
3640 ; X32-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
3641 ; X32-SSE-NEXT: punpcklwd %xmm3, %xmm1 # encoding: [0x66,0x0f,0x61,0xcb]
3642 ; X32-SSE-NEXT: # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
3643 ; X32-SSE-NEXT: punpckldq %xmm2, %xmm1 # encoding: [0x66,0x0f,0x62,0xca]
3644 ; X32-SSE-NEXT: # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
3645 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x18]
3646 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3647 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x20]
3648 ; X32-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
3649 ; X32-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
3650 ; X32-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
3651 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x28]
3652 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3653 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x30]
3654 ; X32-SSE-NEXT: movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
3655 ; X32-SSE-NEXT: punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
3656 ; X32-SSE-NEXT: # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
3657 ; X32-SSE-NEXT: punpcklwd %xmm2, %xmm3 # encoding: [0x66,0x0f,0x61,0xda]
3658 ; X32-SSE-NEXT: # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
3659 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x38]
3660 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3661 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x40]
3662 ; X32-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
3663 ; X32-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
3664 ; X32-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
3665 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x48]
3666 ; X32-SSE-NEXT: movd %eax, %xmm4 # encoding: [0x66,0x0f,0x6e,0xe0]
3667 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x50]
3668 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3669 ; X32-SSE-NEXT: punpcklbw %xmm4, %xmm0 # encoding: [0x66,0x0f,0x60,0xc4]
3670 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
3671 ; X32-SSE-NEXT: punpcklwd %xmm2, %xmm0 # encoding: [0x66,0x0f,0x61,0xc2]
3672 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
3673 ; X32-SSE-NEXT: punpckldq %xmm3, %xmm0 # encoding: [0x66,0x0f,0x62,0xc3]
3674 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
3675 ; X32-SSE-NEXT: punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
3676 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
3677 ; X32-SSE-NEXT: retq # encoding: [0xc3]
3679 ; X32-AVX1-LABEL: test_mm_set_epi8:
3680 ; X32-AVX1: # %bb.0:
3681 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %r10d # encoding: [0x67,0x44,0x0f,0xb6,0x54,0x24,0x48]
3682 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x50]
3683 ; X32-AVX1-NEXT: vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
3684 ; X32-AVX1-NEXT: vpinsrb $1, %r10d, %xmm0, %xmm0 # encoding: [0xc4,0xc3,0x79,0x20,0xc2,0x01]
3685 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x40]
3686 ; X32-AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
3687 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x38]
3688 ; X32-AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
3689 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x30]
3690 ; X32-AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
3691 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x28]
3692 ; X32-AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
3693 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x20]
3694 ; X32-AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
3695 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x18]
3696 ; X32-AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
3697 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x10]
3698 ; X32-AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
3699 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x08]
3700 ; X32-AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
3701 ; X32-AVX1-NEXT: movzbl %r9b, %eax # encoding: [0x41,0x0f,0xb6,0xc1]
3702 ; X32-AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
3703 ; X32-AVX1-NEXT: movzbl %r8b, %eax # encoding: [0x41,0x0f,0xb6,0xc0]
3704 ; X32-AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
3705 ; X32-AVX1-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
3706 ; X32-AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
3707 ; X32-AVX1-NEXT: movzbl %dl, %eax # encoding: [0x0f,0xb6,0xc2]
3708 ; X32-AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
3709 ; X32-AVX1-NEXT: movzbl %sil, %eax # encoding: [0x40,0x0f,0xb6,0xc6]
3710 ; X32-AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
3711 ; X32-AVX1-NEXT: movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
3712 ; X32-AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
3713 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
3715 ; X32-AVX512-LABEL: test_mm_set_epi8:
3716 ; X32-AVX512: # %bb.0:
3717 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %r10d # encoding: [0x67,0x44,0x0f,0xb6,0x54,0x24,0x48]
3718 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x50]
3719 ; X32-AVX512-NEXT: vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
3720 ; X32-AVX512-NEXT: vpinsrb $1, %r10d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc3,0x79,0x20,0xc2,0x01]
3721 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x40]
3722 ; X32-AVX512-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
3723 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x38]
3724 ; X32-AVX512-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
3725 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x30]
3726 ; X32-AVX512-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
3727 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x28]
3728 ; X32-AVX512-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
3729 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x20]
3730 ; X32-AVX512-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
3731 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x18]
3732 ; X32-AVX512-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
3733 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x10]
3734 ; X32-AVX512-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
3735 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x08]
3736 ; X32-AVX512-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
3737 ; X32-AVX512-NEXT: movzbl %r9b, %eax # encoding: [0x41,0x0f,0xb6,0xc1]
3738 ; X32-AVX512-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
3739 ; X32-AVX512-NEXT: movzbl %r8b, %eax # encoding: [0x41,0x0f,0xb6,0xc0]
3740 ; X32-AVX512-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
3741 ; X32-AVX512-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
3742 ; X32-AVX512-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
3743 ; X32-AVX512-NEXT: movzbl %dl, %eax # encoding: [0x0f,0xb6,0xc2]
3744 ; X32-AVX512-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
3745 ; X32-AVX512-NEXT: movzbl %sil, %eax # encoding: [0x40,0x0f,0xb6,0xc6]
3746 ; X32-AVX512-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
3747 ; X32-AVX512-NEXT: movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
3748 ; X32-AVX512-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
3749 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
3750 %res0 = insertelement <16 x i8> undef, i8 %a15, i32 0
3751 %res1 = insertelement <16 x i8> %res0, i8 %a14, i32 1
3752 %res2 = insertelement <16 x i8> %res1, i8 %a13, i32 2
3753 %res3 = insertelement <16 x i8> %res2, i8 %a12, i32 3
3754 %res4 = insertelement <16 x i8> %res3, i8 %a11, i32 4
3755 %res5 = insertelement <16 x i8> %res4, i8 %a10, i32 5
3756 %res6 = insertelement <16 x i8> %res5, i8 %a9 , i32 6
3757 %res7 = insertelement <16 x i8> %res6, i8 %a8 , i32 7
3758 %res8 = insertelement <16 x i8> %res7, i8 %a7 , i32 8
3759 %res9 = insertelement <16 x i8> %res8, i8 %a6 , i32 9
3760 %res10 = insertelement <16 x i8> %res9, i8 %a5 , i32 10
3761 %res11 = insertelement <16 x i8> %res10, i8 %a4 , i32 11
3762 %res12 = insertelement <16 x i8> %res11, i8 %a3 , i32 12
3763 %res13 = insertelement <16 x i8> %res12, i8 %a2 , i32 13
3764 %res14 = insertelement <16 x i8> %res13, i8 %a1 , i32 14
3765 %res15 = insertelement <16 x i8> %res14, i8 %a0 , i32 15
3766 %res = bitcast <16 x i8> %res15 to <2 x i64>
3770 define <2 x i64> @test_mm_set_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) nounwind {
3771 ; X86-SSE-LABEL: test_mm_set_epi16:
3773 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
3774 ; X86-SSE-NEXT: movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
3775 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x08]
3776 ; X86-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
3777 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x0c]
3778 ; X86-SSE-NEXT: movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
3779 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
3780 ; X86-SSE-NEXT: movd %eax, %xmm4 # encoding: [0x66,0x0f,0x6e,0xe0]
3781 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x14]
3782 ; X86-SSE-NEXT: movd %eax, %xmm5 # encoding: [0x66,0x0f,0x6e,0xe8]
3783 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x18]
3784 ; X86-SSE-NEXT: movd %eax, %xmm6 # encoding: [0x66,0x0f,0x6e,0xf0]
3785 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x1c]
3786 ; X86-SSE-NEXT: movd %eax, %xmm7 # encoding: [0x66,0x0f,0x6e,0xf8]
3787 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x20]
3788 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
3789 ; X86-SSE-NEXT: punpcklwd %xmm1, %xmm2 # encoding: [0x66,0x0f,0x61,0xd1]
3790 ; X86-SSE-NEXT: # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
3791 ; X86-SSE-NEXT: punpcklwd %xmm3, %xmm4 # encoding: [0x66,0x0f,0x61,0xe3]
3792 ; X86-SSE-NEXT: # xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
3793 ; X86-SSE-NEXT: punpckldq %xmm2, %xmm4 # encoding: [0x66,0x0f,0x62,0xe2]
3794 ; X86-SSE-NEXT: # xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
3795 ; X86-SSE-NEXT: punpcklwd %xmm5, %xmm6 # encoding: [0x66,0x0f,0x61,0xf5]
3796 ; X86-SSE-NEXT: # xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
3797 ; X86-SSE-NEXT: punpcklwd %xmm7, %xmm0 # encoding: [0x66,0x0f,0x61,0xc7]
3798 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
3799 ; X86-SSE-NEXT: punpckldq %xmm6, %xmm0 # encoding: [0x66,0x0f,0x62,0xc6]
3800 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
3801 ; X86-SSE-NEXT: punpcklqdq %xmm4, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc4]
3802 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm4[0]
3803 ; X86-SSE-NEXT: retl # encoding: [0xc3]
3805 ; X86-AVX1-LABEL: test_mm_set_epi16:
3806 ; X86-AVX1: # %bb.0:
3807 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x20]
3808 ; X86-AVX1-NEXT: vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
3809 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x1c]
3810 ; X86-AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
3811 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x18]
3812 ; X86-AVX1-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x02]
3813 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x14]
3814 ; X86-AVX1-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x03]
3815 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
3816 ; X86-AVX1-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x04]
3817 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x0c]
3818 ; X86-AVX1-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x05]
3819 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x08]
3820 ; X86-AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
3821 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
3822 ; X86-AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
3823 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
3825 ; X86-AVX512-LABEL: test_mm_set_epi16:
3826 ; X86-AVX512: # %bb.0:
3827 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x20]
3828 ; X86-AVX512-NEXT: vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
3829 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x1c]
3830 ; X86-AVX512-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
3831 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x18]
3832 ; X86-AVX512-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x02]
3833 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x14]
3834 ; X86-AVX512-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x03]
3835 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
3836 ; X86-AVX512-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x04]
3837 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x0c]
3838 ; X86-AVX512-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x05]
3839 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x08]
3840 ; X86-AVX512-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
3841 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
3842 ; X86-AVX512-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
3843 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
3845 ; X64-SSE-LABEL: test_mm_set_epi16:
3847 ; X64-SSE-NEXT: movzwl {{[0-9]+}}(%rsp), %r10d # encoding: [0x44,0x0f,0xb7,0x54,0x24,0x10]
3848 ; X64-SSE-NEXT: movzwl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x08]
3849 ; X64-SSE-NEXT: movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
3850 ; X64-SSE-NEXT: movd %esi, %xmm1 # encoding: [0x66,0x0f,0x6e,0xce]
3851 ; X64-SSE-NEXT: punpcklwd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x61,0xc8]
3852 ; X64-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
3853 ; X64-SSE-NEXT: movd %edx, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc2]
3854 ; X64-SSE-NEXT: movd %ecx, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd1]
3855 ; X64-SSE-NEXT: punpcklwd %xmm0, %xmm2 # encoding: [0x66,0x0f,0x61,0xd0]
3856 ; X64-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
3857 ; X64-SSE-NEXT: punpckldq %xmm1, %xmm2 # encoding: [0x66,0x0f,0x62,0xd1]
3858 ; X64-SSE-NEXT: # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
3859 ; X64-SSE-NEXT: movd %r8d, %xmm0 # encoding: [0x66,0x41,0x0f,0x6e,0xc0]
3860 ; X64-SSE-NEXT: movd %r9d, %xmm1 # encoding: [0x66,0x41,0x0f,0x6e,0xc9]
3861 ; X64-SSE-NEXT: punpcklwd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x61,0xc8]
3862 ; X64-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
3863 ; X64-SSE-NEXT: movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
3864 ; X64-SSE-NEXT: movd %r10d, %xmm0 # encoding: [0x66,0x41,0x0f,0x6e,0xc2]
3865 ; X64-SSE-NEXT: punpcklwd %xmm3, %xmm0 # encoding: [0x66,0x0f,0x61,0xc3]
3866 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
3867 ; X64-SSE-NEXT: punpckldq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x62,0xc1]
3868 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
3869 ; X64-SSE-NEXT: punpcklqdq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc2]
3870 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0]
3871 ; X64-SSE-NEXT: retq # encoding: [0xc3]
3873 ; X64-AVX1-LABEL: test_mm_set_epi16:
3874 ; X64-AVX1: # %bb.0:
3875 ; X64-AVX1-NEXT: movzwl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
3876 ; X64-AVX1-NEXT: movzwl {{[0-9]+}}(%rsp), %r10d # encoding: [0x44,0x0f,0xb7,0x54,0x24,0x08]
3877 ; X64-AVX1-NEXT: vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
3878 ; X64-AVX1-NEXT: vpinsrw $1, %r10d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc2,0x01]
3879 ; X64-AVX1-NEXT: vpinsrw $2, %r9d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc1,0x02]
3880 ; X64-AVX1-NEXT: vpinsrw $3, %r8d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc0,0x03]
3881 ; X64-AVX1-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc1,0x04]
3882 ; X64-AVX1-NEXT: vpinsrw $5, %edx, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc2,0x05]
3883 ; X64-AVX1-NEXT: vpinsrw $6, %esi, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc6,0x06]
3884 ; X64-AVX1-NEXT: vpinsrw $7, %edi, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc7,0x07]
3885 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
3887 ; X64-AVX512-LABEL: test_mm_set_epi16:
3888 ; X64-AVX512: # %bb.0:
3889 ; X64-AVX512-NEXT: movzwl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
3890 ; X64-AVX512-NEXT: movzwl {{[0-9]+}}(%rsp), %r10d # encoding: [0x44,0x0f,0xb7,0x54,0x24,0x08]
3891 ; X64-AVX512-NEXT: vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
3892 ; X64-AVX512-NEXT: vpinsrw $1, %r10d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc2,0x01]
3893 ; X64-AVX512-NEXT: vpinsrw $2, %r9d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc1,0x02]
3894 ; X64-AVX512-NEXT: vpinsrw $3, %r8d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc0,0x03]
3895 ; X64-AVX512-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc1,0x04]
3896 ; X64-AVX512-NEXT: vpinsrw $5, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc2,0x05]
3897 ; X64-AVX512-NEXT: vpinsrw $6, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc6,0x06]
3898 ; X64-AVX512-NEXT: vpinsrw $7, %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc7,0x07]
3899 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
3901 ; X32-SSE-LABEL: test_mm_set_epi16:
3903 ; X32-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %r10d # encoding: [0x67,0x44,0x0f,0xb7,0x54,0x24,0x10]
3904 ; X32-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb7,0x44,0x24,0x08]
3905 ; X32-SSE-NEXT: movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
3906 ; X32-SSE-NEXT: movd %esi, %xmm1 # encoding: [0x66,0x0f,0x6e,0xce]
3907 ; X32-SSE-NEXT: punpcklwd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x61,0xc8]
3908 ; X32-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
3909 ; X32-SSE-NEXT: movd %edx, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc2]
3910 ; X32-SSE-NEXT: movd %ecx, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd1]
3911 ; X32-SSE-NEXT: punpcklwd %xmm0, %xmm2 # encoding: [0x66,0x0f,0x61,0xd0]
3912 ; X32-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
3913 ; X32-SSE-NEXT: punpckldq %xmm1, %xmm2 # encoding: [0x66,0x0f,0x62,0xd1]
3914 ; X32-SSE-NEXT: # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
3915 ; X32-SSE-NEXT: movd %r8d, %xmm0 # encoding: [0x66,0x41,0x0f,0x6e,0xc0]
3916 ; X32-SSE-NEXT: movd %r9d, %xmm1 # encoding: [0x66,0x41,0x0f,0x6e,0xc9]
3917 ; X32-SSE-NEXT: punpcklwd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x61,0xc8]
3918 ; X32-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
3919 ; X32-SSE-NEXT: movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
3920 ; X32-SSE-NEXT: movd %r10d, %xmm0 # encoding: [0x66,0x41,0x0f,0x6e,0xc2]
3921 ; X32-SSE-NEXT: punpcklwd %xmm3, %xmm0 # encoding: [0x66,0x0f,0x61,0xc3]
3922 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
3923 ; X32-SSE-NEXT: punpckldq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x62,0xc1]
3924 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
3925 ; X32-SSE-NEXT: punpcklqdq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc2]
3926 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0]
3927 ; X32-SSE-NEXT: retq # encoding: [0xc3]
3929 ; X32-AVX1-LABEL: test_mm_set_epi16:
3930 ; X32-AVX1: # %bb.0:
3931 ; X32-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb7,0x44,0x24,0x10]
3932 ; X32-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %r10d # encoding: [0x67,0x44,0x0f,0xb7,0x54,0x24,0x08]
3933 ; X32-AVX1-NEXT: vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
3934 ; X32-AVX1-NEXT: vpinsrw $1, %r10d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc2,0x01]
3935 ; X32-AVX1-NEXT: vpinsrw $2, %r9d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc1,0x02]
3936 ; X32-AVX1-NEXT: vpinsrw $3, %r8d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc0,0x03]
3937 ; X32-AVX1-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc1,0x04]
3938 ; X32-AVX1-NEXT: vpinsrw $5, %edx, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc2,0x05]
3939 ; X32-AVX1-NEXT: vpinsrw $6, %esi, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc6,0x06]
3940 ; X32-AVX1-NEXT: vpinsrw $7, %edi, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc7,0x07]
3941 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
3943 ; X32-AVX512-LABEL: test_mm_set_epi16:
3944 ; X32-AVX512: # %bb.0:
3945 ; X32-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb7,0x44,0x24,0x10]
3946 ; X32-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %r10d # encoding: [0x67,0x44,0x0f,0xb7,0x54,0x24,0x08]
3947 ; X32-AVX512-NEXT: vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
3948 ; X32-AVX512-NEXT: vpinsrw $1, %r10d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc2,0x01]
3949 ; X32-AVX512-NEXT: vpinsrw $2, %r9d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc1,0x02]
3950 ; X32-AVX512-NEXT: vpinsrw $3, %r8d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc0,0x03]
3951 ; X32-AVX512-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc1,0x04]
3952 ; X32-AVX512-NEXT: vpinsrw $5, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc2,0x05]
3953 ; X32-AVX512-NEXT: vpinsrw $6, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc6,0x06]
3954 ; X32-AVX512-NEXT: vpinsrw $7, %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc7,0x07]
3955 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
3956 %res0 = insertelement <8 x i16> undef, i16 %a7, i32 0
3957 %res1 = insertelement <8 x i16> %res0, i16 %a6, i32 1
3958 %res2 = insertelement <8 x i16> %res1, i16 %a5, i32 2
3959 %res3 = insertelement <8 x i16> %res2, i16 %a4, i32 3
3960 %res4 = insertelement <8 x i16> %res3, i16 %a3, i32 4
3961 %res5 = insertelement <8 x i16> %res4, i16 %a2, i32 5
3962 %res6 = insertelement <8 x i16> %res5, i16 %a1, i32 6
3963 %res7 = insertelement <8 x i16> %res6, i16 %a0, i32 7
3964 %res = bitcast <8 x i16> %res7 to <2 x i64>
3968 define <2 x i64> @test_mm_set_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind {
3969 ; X86-SSE-LABEL: test_mm_set_epi32:
3971 ; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
3972 ; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
3973 ; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x08]
3974 ; X86-SSE-NEXT: # xmm1 = mem[0],zero,zero,zero
3975 ; X86-SSE-NEXT: unpcklps %xmm0, %xmm1 # encoding: [0x0f,0x14,0xc8]
3976 ; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3977 ; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm2 # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x0c]
3978 ; X86-SSE-NEXT: # xmm2 = mem[0],zero,zero,zero
3979 ; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x10]
3980 ; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
3981 ; X86-SSE-NEXT: unpcklps %xmm2, %xmm0 # encoding: [0x0f,0x14,0xc2]
3982 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
3983 ; X86-SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
3984 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
3985 ; X86-SSE-NEXT: retl # encoding: [0xc3]
3987 ; X86-AVX1-LABEL: test_mm_set_epi32:
3988 ; X86-AVX1: # %bb.0:
3989 ; X86-AVX1-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x10]
3990 ; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
3991 ; X86-AVX1-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x01]
3992 ; X86-AVX1-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x02]
3993 ; X86-AVX1-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x03]
3994 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
3996 ; X86-AVX512-LABEL: test_mm_set_epi32:
3997 ; X86-AVX512: # %bb.0:
3998 ; X86-AVX512-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x10]
3999 ; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
4000 ; X86-AVX512-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x01]
4001 ; X86-AVX512-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x02]
4002 ; X86-AVX512-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x03]
4003 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
4005 ; X64-SSE-LABEL: test_mm_set_epi32:
4007 ; X64-SSE-NEXT: movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
4008 ; X64-SSE-NEXT: movd %esi, %xmm1 # encoding: [0x66,0x0f,0x6e,0xce]
4009 ; X64-SSE-NEXT: punpckldq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x62,0xc8]
4010 ; X64-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4011 ; X64-SSE-NEXT: movd %edx, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd2]
4012 ; X64-SSE-NEXT: movd %ecx, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc1]
4013 ; X64-SSE-NEXT: punpckldq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x62,0xc2]
4014 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
4015 ; X64-SSE-NEXT: punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
4016 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
4017 ; X64-SSE-NEXT: retq # encoding: [0xc3]
4019 ; X64-AVX1-LABEL: test_mm_set_epi32:
4020 ; X64-AVX1: # %bb.0:
4021 ; X64-AVX1-NEXT: vmovd %ecx, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc1]
4022 ; X64-AVX1-NEXT: vpinsrd $1, %edx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc2,0x01]
4023 ; X64-AVX1-NEXT: vpinsrd $2, %esi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc6,0x02]
4024 ; X64-AVX1-NEXT: vpinsrd $3, %edi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc7,0x03]
4025 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
4027 ; X64-AVX512-LABEL: test_mm_set_epi32:
4028 ; X64-AVX512: # %bb.0:
4029 ; X64-AVX512-NEXT: vmovd %ecx, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
4030 ; X64-AVX512-NEXT: vpinsrd $1, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc2,0x01]
4031 ; X64-AVX512-NEXT: vpinsrd $2, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc6,0x02]
4032 ; X64-AVX512-NEXT: vpinsrd $3, %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc7,0x03]
4033 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
4035 ; X32-SSE-LABEL: test_mm_set_epi32:
4037 ; X32-SSE-NEXT: movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
4038 ; X32-SSE-NEXT: movd %esi, %xmm1 # encoding: [0x66,0x0f,0x6e,0xce]
4039 ; X32-SSE-NEXT: punpckldq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x62,0xc8]
4040 ; X32-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4041 ; X32-SSE-NEXT: movd %edx, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd2]
4042 ; X32-SSE-NEXT: movd %ecx, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc1]
4043 ; X32-SSE-NEXT: punpckldq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x62,0xc2]
4044 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
4045 ; X32-SSE-NEXT: punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
4046 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
4047 ; X32-SSE-NEXT: retq # encoding: [0xc3]
4049 ; X32-AVX1-LABEL: test_mm_set_epi32:
4050 ; X32-AVX1: # %bb.0:
4051 ; X32-AVX1-NEXT: vmovd %ecx, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc1]
4052 ; X32-AVX1-NEXT: vpinsrd $1, %edx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc2,0x01]
4053 ; X32-AVX1-NEXT: vpinsrd $2, %esi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc6,0x02]
4054 ; X32-AVX1-NEXT: vpinsrd $3, %edi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc7,0x03]
4055 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
4057 ; X32-AVX512-LABEL: test_mm_set_epi32:
4058 ; X32-AVX512: # %bb.0:
4059 ; X32-AVX512-NEXT: vmovd %ecx, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
4060 ; X32-AVX512-NEXT: vpinsrd $1, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc2,0x01]
4061 ; X32-AVX512-NEXT: vpinsrd $2, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc6,0x02]
4062 ; X32-AVX512-NEXT: vpinsrd $3, %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc7,0x03]
4063 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
4064 %res0 = insertelement <4 x i32> undef, i32 %a3, i32 0
4065 %res1 = insertelement <4 x i32> %res0, i32 %a2, i32 1
4066 %res2 = insertelement <4 x i32> %res1, i32 %a1, i32 2
4067 %res3 = insertelement <4 x i32> %res2, i32 %a0, i32 3
4068 %res = bitcast <4 x i32> %res3 to <2 x i64>
4072 ; TODO test_mm_set_epi64
4074 define <2 x i64> @test_mm_set_epi64x(i64 %a0, i64 %a1) nounwind {
4075 ; X86-SSE-LABEL: test_mm_set_epi64x:
4077 ; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x04]
4078 ; X86-SSE-NEXT: # xmm1 = mem[0],zero,zero,zero
4079 ; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x08]
4080 ; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
4081 ; X86-SSE-NEXT: unpcklps %xmm0, %xmm1 # encoding: [0x0f,0x14,0xc8]
4082 ; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4083 ; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x0c]
4084 ; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
4085 ; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm2 # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x10]
4086 ; X86-SSE-NEXT: # xmm2 = mem[0],zero,zero,zero
4087 ; X86-SSE-NEXT: unpcklps %xmm2, %xmm0 # encoding: [0x0f,0x14,0xc2]
4088 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
4089 ; X86-SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
4090 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
4091 ; X86-SSE-NEXT: retl # encoding: [0xc3]
4093 ; X86-AVX1-LABEL: test_mm_set_epi64x:
4094 ; X86-AVX1: # %bb.0:
4095 ; X86-AVX1-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x0c]
4096 ; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
4097 ; X86-AVX1-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x01]
4098 ; X86-AVX1-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x02]
4099 ; X86-AVX1-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x03]
4100 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
4102 ; X86-AVX512-LABEL: test_mm_set_epi64x:
4103 ; X86-AVX512: # %bb.0:
4104 ; X86-AVX512-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x0c]
4105 ; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
4106 ; X86-AVX512-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x01]
4107 ; X86-AVX512-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x02]
4108 ; X86-AVX512-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x03]
4109 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
4111 ; X64-SSE-LABEL: test_mm_set_epi64x:
4113 ; X64-SSE-NEXT: movq %rdi, %xmm1 # encoding: [0x66,0x48,0x0f,0x6e,0xcf]
4114 ; X64-SSE-NEXT: movq %rsi, %xmm0 # encoding: [0x66,0x48,0x0f,0x6e,0xc6]
4115 ; X64-SSE-NEXT: punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
4116 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
4117 ; X64-SSE-NEXT: retq # encoding: [0xc3]
4119 ; X64-AVX1-LABEL: test_mm_set_epi64x:
4120 ; X64-AVX1: # %bb.0:
4121 ; X64-AVX1-NEXT: vmovq %rdi, %xmm0 # encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
4122 ; X64-AVX1-NEXT: vmovq %rsi, %xmm1 # encoding: [0xc4,0xe1,0xf9,0x6e,0xce]
4123 ; X64-AVX1-NEXT: vpunpcklqdq %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0x6c,0xc0]
4124 ; X64-AVX1-NEXT: # xmm0 = xmm1[0],xmm0[0]
4125 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
4127 ; X64-AVX512-LABEL: test_mm_set_epi64x:
4128 ; X64-AVX512: # %bb.0:
4129 ; X64-AVX512-NEXT: vmovq %rdi, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
4130 ; X64-AVX512-NEXT: vmovq %rsi, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xce]
4131 ; X64-AVX512-NEXT: vpunpcklqdq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x6c,0xc0]
4132 ; X64-AVX512-NEXT: # xmm0 = xmm1[0],xmm0[0]
4133 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
4135 ; X32-SSE-LABEL: test_mm_set_epi64x:
4137 ; X32-SSE-NEXT: movq %rdi, %xmm1 # encoding: [0x66,0x48,0x0f,0x6e,0xcf]
4138 ; X32-SSE-NEXT: movq %rsi, %xmm0 # encoding: [0x66,0x48,0x0f,0x6e,0xc6]
4139 ; X32-SSE-NEXT: punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
4140 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
4141 ; X32-SSE-NEXT: retq # encoding: [0xc3]
4143 ; X32-AVX1-LABEL: test_mm_set_epi64x:
4144 ; X32-AVX1: # %bb.0:
4145 ; X32-AVX1-NEXT: vmovq %rdi, %xmm0 # encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
4146 ; X32-AVX1-NEXT: vmovq %rsi, %xmm1 # encoding: [0xc4,0xe1,0xf9,0x6e,0xce]
4147 ; X32-AVX1-NEXT: vpunpcklqdq %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0x6c,0xc0]
4148 ; X32-AVX1-NEXT: # xmm0 = xmm1[0],xmm0[0]
4149 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
4151 ; X32-AVX512-LABEL: test_mm_set_epi64x:
4152 ; X32-AVX512: # %bb.0:
4153 ; X32-AVX512-NEXT: vmovq %rdi, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
4154 ; X32-AVX512-NEXT: vmovq %rsi, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xce]
4155 ; X32-AVX512-NEXT: vpunpcklqdq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x6c,0xc0]
4156 ; X32-AVX512-NEXT: # xmm0 = xmm1[0],xmm0[0]
4157 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
4158 %res0 = insertelement <2 x i64> undef, i64 %a1, i32 0
4159 %res1 = insertelement <2 x i64> %res0, i64 %a0, i32 1
4163 define <2 x double> @test_mm_set_pd(double %a0, double %a1) nounwind {
4164 ; X86-SSE-LABEL: test_mm_set_pd:
4166 ; X86-SSE-NEXT: movsd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf2,0x0f,0x10,0x44,0x24,0x0c]
4167 ; X86-SSE-NEXT: # xmm0 = mem[0],zero
4168 ; X86-SSE-NEXT: movsd {{[0-9]+}}(%esp), %xmm1 # encoding: [0xf2,0x0f,0x10,0x4c,0x24,0x04]
4169 ; X86-SSE-NEXT: # xmm1 = mem[0],zero
4170 ; X86-SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
4171 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
4172 ; X86-SSE-NEXT: retl # encoding: [0xc3]
4174 ; X86-AVX1-LABEL: test_mm_set_pd:
4175 ; X86-AVX1: # %bb.0:
4176 ; X86-AVX1-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfb,0x10,0x44,0x24,0x0c]
4177 ; X86-AVX1-NEXT: # xmm0 = mem[0],zero
4178 ; X86-AVX1-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm1 # encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x04]
4179 ; X86-AVX1-NEXT: # xmm1 = mem[0],zero
4180 ; X86-AVX1-NEXT: vmovlhps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x16,0xc1]
4181 ; X86-AVX1-NEXT: # xmm0 = xmm0[0],xmm1[0]
4182 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
4184 ; X86-AVX512-LABEL: test_mm_set_pd:
4185 ; X86-AVX512: # %bb.0:
4186 ; X86-AVX512-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x44,0x24,0x0c]
4187 ; X86-AVX512-NEXT: # xmm0 = mem[0],zero
4188 ; X86-AVX512-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x04]
4189 ; X86-AVX512-NEXT: # xmm1 = mem[0],zero
4190 ; X86-AVX512-NEXT: vmovlhps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x16,0xc1]
4191 ; X86-AVX512-NEXT: # xmm0 = xmm0[0],xmm1[0]
4192 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
4194 ; X64-SSE-LABEL: test_mm_set_pd:
4196 ; X64-SSE-NEXT: movlhps %xmm0, %xmm1 # encoding: [0x0f,0x16,0xc8]
4197 ; X64-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0]
4198 ; X64-SSE-NEXT: movaps %xmm1, %xmm0 # encoding: [0x0f,0x28,0xc1]
4199 ; X64-SSE-NEXT: retq # encoding: [0xc3]
4201 ; X64-AVX1-LABEL: test_mm_set_pd:
4202 ; X64-AVX1: # %bb.0:
4203 ; X64-AVX1-NEXT: vmovlhps %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf0,0x16,0xc0]
4204 ; X64-AVX1-NEXT: # xmm0 = xmm1[0],xmm0[0]
4205 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
4207 ; X64-AVX512-LABEL: test_mm_set_pd:
4208 ; X64-AVX512: # %bb.0:
4209 ; X64-AVX512-NEXT: vmovlhps %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf0,0x16,0xc0]
4210 ; X64-AVX512-NEXT: # xmm0 = xmm1[0],xmm0[0]
4211 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
4213 ; X32-SSE-LABEL: test_mm_set_pd:
4215 ; X32-SSE-NEXT: movlhps %xmm0, %xmm1 # encoding: [0x0f,0x16,0xc8]
4216 ; X32-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0]
4217 ; X32-SSE-NEXT: movaps %xmm1, %xmm0 # encoding: [0x0f,0x28,0xc1]
4218 ; X32-SSE-NEXT: retq # encoding: [0xc3]
4220 ; X32-AVX1-LABEL: test_mm_set_pd:
4221 ; X32-AVX1: # %bb.0:
4222 ; X32-AVX1-NEXT: vmovlhps %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf0,0x16,0xc0]
4223 ; X32-AVX1-NEXT: # xmm0 = xmm1[0],xmm0[0]
4224 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
4226 ; X32-AVX512-LABEL: test_mm_set_pd:
4227 ; X32-AVX512: # %bb.0:
4228 ; X32-AVX512-NEXT: vmovlhps %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf0,0x16,0xc0]
4229 ; X32-AVX512-NEXT: # xmm0 = xmm1[0],xmm0[0]
4230 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
4231 %res0 = insertelement <2 x double> undef, double %a1, i32 0
4232 %res1 = insertelement <2 x double> %res0, double %a0, i32 1
4233 ret <2 x double> %res1
4236 define <2 x double> @test_mm_set_pd1(double %a0) nounwind {
4237 ; X86-SSE-LABEL: test_mm_set_pd1:
4239 ; X86-SSE-NEXT: movsd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf2,0x0f,0x10,0x44,0x24,0x04]
4240 ; X86-SSE-NEXT: # xmm0 = mem[0],zero
4241 ; X86-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
4242 ; X86-SSE-NEXT: # xmm0 = xmm0[0,0]
4243 ; X86-SSE-NEXT: retl # encoding: [0xc3]
4245 ; X86-AVX1-LABEL: test_mm_set_pd1:
4246 ; X86-AVX1: # %bb.0:
4247 ; X86-AVX1-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfb,0x10,0x44,0x24,0x04]
4248 ; X86-AVX1-NEXT: # xmm0 = mem[0],zero
4249 ; X86-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
4250 ; X86-AVX1-NEXT: # xmm0 = xmm0[0,0]
4251 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
4253 ; X86-AVX512-LABEL: test_mm_set_pd1:
4254 ; X86-AVX512: # %bb.0:
4255 ; X86-AVX512-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x44,0x24,0x04]
4256 ; X86-AVX512-NEXT: # xmm0 = mem[0],zero
4257 ; X86-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
4258 ; X86-AVX512-NEXT: # xmm0 = xmm0[0,0]
4259 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
4261 ; X64-SSE-LABEL: test_mm_set_pd1:
4263 ; X64-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
4264 ; X64-SSE-NEXT: # xmm0 = xmm0[0,0]
4265 ; X64-SSE-NEXT: retq # encoding: [0xc3]
4267 ; X64-AVX1-LABEL: test_mm_set_pd1:
4268 ; X64-AVX1: # %bb.0:
4269 ; X64-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
4270 ; X64-AVX1-NEXT: # xmm0 = xmm0[0,0]
4271 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
4273 ; X64-AVX512-LABEL: test_mm_set_pd1:
4274 ; X64-AVX512: # %bb.0:
4275 ; X64-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
4276 ; X64-AVX512-NEXT: # xmm0 = xmm0[0,0]
4277 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
4279 ; X32-SSE-LABEL: test_mm_set_pd1:
4281 ; X32-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
4282 ; X32-SSE-NEXT: # xmm0 = xmm0[0,0]
4283 ; X32-SSE-NEXT: retq # encoding: [0xc3]
4285 ; X32-AVX1-LABEL: test_mm_set_pd1:
4286 ; X32-AVX1: # %bb.0:
4287 ; X32-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
4288 ; X32-AVX1-NEXT: # xmm0 = xmm0[0,0]
4289 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
4291 ; X32-AVX512-LABEL: test_mm_set_pd1:
4292 ; X32-AVX512: # %bb.0:
4293 ; X32-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
4294 ; X32-AVX512-NEXT: # xmm0 = xmm0[0,0]
4295 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
4296 %res0 = insertelement <2 x double> undef, double %a0, i32 0
4297 %res1 = insertelement <2 x double> %res0, double %a0, i32 1
4298 ret <2 x double> %res1
4301 define <2 x double> @test_mm_set_sd(double %a0) nounwind {
4302 ; X86-SSE-LABEL: test_mm_set_sd:
4304 ; X86-SSE-NEXT: movq {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x7e,0x44,0x24,0x04]
4305 ; X86-SSE-NEXT: # xmm0 = mem[0],zero
4306 ; X86-SSE-NEXT: movq %xmm0, %xmm0 # encoding: [0xf3,0x0f,0x7e,0xc0]
4307 ; X86-SSE-NEXT: # xmm0 = xmm0[0],zero
4308 ; X86-SSE-NEXT: retl # encoding: [0xc3]
4310 ; X86-AVX1-LABEL: test_mm_set_sd:
4311 ; X86-AVX1: # %bb.0:
4312 ; X86-AVX1-NEXT: vmovq {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfa,0x7e,0x44,0x24,0x04]
4313 ; X86-AVX1-NEXT: # xmm0 = mem[0],zero
4314 ; X86-AVX1-NEXT: vmovq %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x7e,0xc0]
4315 ; X86-AVX1-NEXT: # xmm0 = xmm0[0],zero
4316 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
4318 ; X86-AVX512-LABEL: test_mm_set_sd:
4319 ; X86-AVX512: # %bb.0:
4320 ; X86-AVX512-NEXT: vmovq {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0x44,0x24,0x04]
4321 ; X86-AVX512-NEXT: # xmm0 = mem[0],zero
4322 ; X86-AVX512-NEXT: vmovq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc0]
4323 ; X86-AVX512-NEXT: # xmm0 = xmm0[0],zero
4324 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
4326 ; X64-SSE-LABEL: test_mm_set_sd:
4328 ; X64-SSE-NEXT: movq %xmm0, %xmm0 # encoding: [0xf3,0x0f,0x7e,0xc0]
4329 ; X64-SSE-NEXT: # xmm0 = xmm0[0],zero
4330 ; X64-SSE-NEXT: retq # encoding: [0xc3]
4332 ; X64-AVX1-LABEL: test_mm_set_sd:
4333 ; X64-AVX1: # %bb.0:
4334 ; X64-AVX1-NEXT: vmovq %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x7e,0xc0]
4335 ; X64-AVX1-NEXT: # xmm0 = xmm0[0],zero
4336 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
4338 ; X64-AVX512-LABEL: test_mm_set_sd:
4339 ; X64-AVX512: # %bb.0:
4340 ; X64-AVX512-NEXT: vmovq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc0]
4341 ; X64-AVX512-NEXT: # xmm0 = xmm0[0],zero
4342 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
4344 ; X32-SSE-LABEL: test_mm_set_sd:
4346 ; X32-SSE-NEXT: movq %xmm0, %xmm0 # encoding: [0xf3,0x0f,0x7e,0xc0]
4347 ; X32-SSE-NEXT: # xmm0 = xmm0[0],zero
4348 ; X32-SSE-NEXT: retq # encoding: [0xc3]
4350 ; X32-AVX1-LABEL: test_mm_set_sd:
4351 ; X32-AVX1: # %bb.0:
4352 ; X32-AVX1-NEXT: vmovq %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x7e,0xc0]
4353 ; X32-AVX1-NEXT: # xmm0 = xmm0[0],zero
4354 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
4356 ; X32-AVX512-LABEL: test_mm_set_sd:
4357 ; X32-AVX512: # %bb.0:
4358 ; X32-AVX512-NEXT: vmovq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc0]
4359 ; X32-AVX512-NEXT: # xmm0 = xmm0[0],zero
4360 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
4361 %res0 = insertelement <2 x double> undef, double %a0, i32 0
4362 %res1 = insertelement <2 x double> %res0, double 0.0, i32 1
4363 ret <2 x double> %res1
4366 define <2 x i64> @test_mm_set1_epi8(i8 %a0) nounwind {
4367 ; X86-SSE-LABEL: test_mm_set1_epi8:
4369 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
4370 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4371 ; X86-SSE-NEXT: punpcklbw %xmm0, %xmm0 # encoding: [0x66,0x0f,0x60,0xc0]
4372 ; X86-SSE-NEXT: # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
4373 ; X86-SSE-NEXT: pshuflw $0, %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x70,0xc0,0x00]
4374 ; X86-SSE-NEXT: # xmm0 = xmm0[0,0,0,0,4,5,6,7]
4375 ; X86-SSE-NEXT: pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
4376 ; X86-SSE-NEXT: # xmm0 = xmm0[0,0,0,0]
4377 ; X86-SSE-NEXT: retl # encoding: [0xc3]
4379 ; X86-AVX1-LABEL: test_mm_set1_epi8:
4380 ; X86-AVX1: # %bb.0:
4381 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
4382 ; X86-AVX1-NEXT: vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
4383 ; X86-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xef,0xc9]
4384 ; X86-AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x00,0xc1]
4385 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
4387 ; X86-AVX512-LABEL: test_mm_set1_epi8:
4388 ; X86-AVX512: # %bb.0:
4389 ; X86-AVX512-NEXT: movb {{[0-9]+}}(%esp), %al # encoding: [0x8a,0x44,0x24,0x04]
4390 ; X86-AVX512-NEXT: vpbroadcastb %eax, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7a,0xc0]
4391 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
4393 ; X64-SSE-LABEL: test_mm_set1_epi8:
4395 ; X64-SSE-NEXT: movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
4396 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4397 ; X64-SSE-NEXT: punpcklbw %xmm0, %xmm0 # encoding: [0x66,0x0f,0x60,0xc0]
4398 ; X64-SSE-NEXT: # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
4399 ; X64-SSE-NEXT: pshuflw $0, %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x70,0xc0,0x00]
4400 ; X64-SSE-NEXT: # xmm0 = xmm0[0,0,0,0,4,5,6,7]
4401 ; X64-SSE-NEXT: pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
4402 ; X64-SSE-NEXT: # xmm0 = xmm0[0,0,0,0]
4403 ; X64-SSE-NEXT: retq # encoding: [0xc3]
4405 ; X64-AVX1-LABEL: test_mm_set1_epi8:
4406 ; X64-AVX1: # %bb.0:
4407 ; X64-AVX1-NEXT: movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
4408 ; X64-AVX1-NEXT: vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
4409 ; X64-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xef,0xc9]
4410 ; X64-AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x00,0xc1]
4411 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
4413 ; X64-AVX512-LABEL: test_mm_set1_epi8:
4414 ; X64-AVX512: # %bb.0:
4415 ; X64-AVX512-NEXT: vpbroadcastb %edi, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7a,0xc7]
4416 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
4418 ; X32-SSE-LABEL: test_mm_set1_epi8:
4420 ; X32-SSE-NEXT: movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
4421 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4422 ; X32-SSE-NEXT: punpcklbw %xmm0, %xmm0 # encoding: [0x66,0x0f,0x60,0xc0]
4423 ; X32-SSE-NEXT: # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
4424 ; X32-SSE-NEXT: pshuflw $0, %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x70,0xc0,0x00]
4425 ; X32-SSE-NEXT: # xmm0 = xmm0[0,0,0,0,4,5,6,7]
4426 ; X32-SSE-NEXT: pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
4427 ; X32-SSE-NEXT: # xmm0 = xmm0[0,0,0,0]
4428 ; X32-SSE-NEXT: retq # encoding: [0xc3]
4430 ; X32-AVX1-LABEL: test_mm_set1_epi8:
4431 ; X32-AVX1: # %bb.0:
4432 ; X32-AVX1-NEXT: movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
4433 ; X32-AVX1-NEXT: vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
4434 ; X32-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xef,0xc9]
4435 ; X32-AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x00,0xc1]
4436 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
4438 ; X32-AVX512-LABEL: test_mm_set1_epi8:
4439 ; X32-AVX512: # %bb.0:
4440 ; X32-AVX512-NEXT: vpbroadcastb %edi, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7a,0xc7]
4441 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
4442 %res0 = insertelement <16 x i8> undef, i8 %a0, i32 0
4443 %res1 = insertelement <16 x i8> %res0, i8 %a0, i32 1
4444 %res2 = insertelement <16 x i8> %res1, i8 %a0, i32 2
4445 %res3 = insertelement <16 x i8> %res2, i8 %a0, i32 3
4446 %res4 = insertelement <16 x i8> %res3, i8 %a0, i32 4
4447 %res5 = insertelement <16 x i8> %res4, i8 %a0, i32 5
4448 %res6 = insertelement <16 x i8> %res5, i8 %a0, i32 6
4449 %res7 = insertelement <16 x i8> %res6, i8 %a0, i32 7
4450 %res8 = insertelement <16 x i8> %res7, i8 %a0, i32 8
4451 %res9 = insertelement <16 x i8> %res8, i8 %a0, i32 9
4452 %res10 = insertelement <16 x i8> %res9, i8 %a0, i32 10
4453 %res11 = insertelement <16 x i8> %res10, i8 %a0, i32 11
4454 %res12 = insertelement <16 x i8> %res11, i8 %a0, i32 12
4455 %res13 = insertelement <16 x i8> %res12, i8 %a0, i32 13
4456 %res14 = insertelement <16 x i8> %res13, i8 %a0, i32 14
4457 %res15 = insertelement <16 x i8> %res14, i8 %a0, i32 15
4458 %res = bitcast <16 x i8> %res15 to <2 x i64>
4462 define <2 x i64> @test_mm_set1_epi16(i16 %a0) nounwind {
4463 ; X86-SSE-LABEL: test_mm_set1_epi16:
4465 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
4466 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4467 ; X86-SSE-NEXT: pshuflw $0, %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x70,0xc0,0x00]
4468 ; X86-SSE-NEXT: # xmm0 = xmm0[0,0,0,0,4,5,6,7]
4469 ; X86-SSE-NEXT: pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
4470 ; X86-SSE-NEXT: # xmm0 = xmm0[0,0,0,0]
4471 ; X86-SSE-NEXT: retl # encoding: [0xc3]
4473 ; X86-AVX1-LABEL: test_mm_set1_epi16:
4474 ; X86-AVX1: # %bb.0:
4475 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
4476 ; X86-AVX1-NEXT: vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
4477 ; X86-AVX1-NEXT: vpshuflw $0, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x70,0xc0,0x00]
4478 ; X86-AVX1-NEXT: # xmm0 = xmm0[0,0,0,0,4,5,6,7]
4479 ; X86-AVX1-NEXT: vpshufd $0, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x00]
4480 ; X86-AVX1-NEXT: # xmm0 = xmm0[0,0,0,0]
4481 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
4483 ; X86-AVX512-LABEL: test_mm_set1_epi16:
4484 ; X86-AVX512: # %bb.0:
4485 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
4486 ; X86-AVX512-NEXT: vpbroadcastw %eax, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7b,0xc0]
4487 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
4489 ; X64-SSE-LABEL: test_mm_set1_epi16:
4491 ; X64-SSE-NEXT: movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
4492 ; X64-SSE-NEXT: pshuflw $0, %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x70,0xc0,0x00]
4493 ; X64-SSE-NEXT: # xmm0 = xmm0[0,0,0,0,4,5,6,7]
4494 ; X64-SSE-NEXT: pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
4495 ; X64-SSE-NEXT: # xmm0 = xmm0[0,0,0,0]
4496 ; X64-SSE-NEXT: retq # encoding: [0xc3]
4498 ; X64-AVX1-LABEL: test_mm_set1_epi16:
4499 ; X64-AVX1: # %bb.0:
4500 ; X64-AVX1-NEXT: vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
4501 ; X64-AVX1-NEXT: vpshuflw $0, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x70,0xc0,0x00]
4502 ; X64-AVX1-NEXT: # xmm0 = xmm0[0,0,0,0,4,5,6,7]
4503 ; X64-AVX1-NEXT: vpshufd $0, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x00]
4504 ; X64-AVX1-NEXT: # xmm0 = xmm0[0,0,0,0]
4505 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
4507 ; X64-AVX512-LABEL: test_mm_set1_epi16:
4508 ; X64-AVX512: # %bb.0:
4509 ; X64-AVX512-NEXT: vpbroadcastw %edi, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7b,0xc7]
4510 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
4512 ; X32-SSE-LABEL: test_mm_set1_epi16:
4514 ; X32-SSE-NEXT: movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
4515 ; X32-SSE-NEXT: pshuflw $0, %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x70,0xc0,0x00]
4516 ; X32-SSE-NEXT: # xmm0 = xmm0[0,0,0,0,4,5,6,7]
4517 ; X32-SSE-NEXT: pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
4518 ; X32-SSE-NEXT: # xmm0 = xmm0[0,0,0,0]
4519 ; X32-SSE-NEXT: retq # encoding: [0xc3]
4521 ; X32-AVX1-LABEL: test_mm_set1_epi16:
4522 ; X32-AVX1: # %bb.0:
4523 ; X32-AVX1-NEXT: vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
4524 ; X32-AVX1-NEXT: vpshuflw $0, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x70,0xc0,0x00]
4525 ; X32-AVX1-NEXT: # xmm0 = xmm0[0,0,0,0,4,5,6,7]
4526 ; X32-AVX1-NEXT: vpshufd $0, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x00]
4527 ; X32-AVX1-NEXT: # xmm0 = xmm0[0,0,0,0]
4528 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
4530 ; X32-AVX512-LABEL: test_mm_set1_epi16:
4531 ; X32-AVX512: # %bb.0:
4532 ; X32-AVX512-NEXT: vpbroadcastw %edi, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7b,0xc7]
4533 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
4534 %res0 = insertelement <8 x i16> undef, i16 %a0, i32 0
4535 %res1 = insertelement <8 x i16> %res0, i16 %a0, i32 1
4536 %res2 = insertelement <8 x i16> %res1, i16 %a0, i32 2
4537 %res3 = insertelement <8 x i16> %res2, i16 %a0, i32 3
4538 %res4 = insertelement <8 x i16> %res3, i16 %a0, i32 4
4539 %res5 = insertelement <8 x i16> %res4, i16 %a0, i32 5
4540 %res6 = insertelement <8 x i16> %res5, i16 %a0, i32 6
4541 %res7 = insertelement <8 x i16> %res6, i16 %a0, i32 7
4542 %res = bitcast <8 x i16> %res7 to <2 x i64>
4546 define <2 x i64> @test_mm_set1_epi32(i32 %a0) nounwind {
4547 ; X86-SSE-LABEL: test_mm_set1_epi32:
4549 ; X86-SSE-NEXT: movd {{[0-9]+}}(%esp), %xmm0 # encoding: [0x66,0x0f,0x6e,0x44,0x24,0x04]
4550 ; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
4551 ; X86-SSE-NEXT: pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
4552 ; X86-SSE-NEXT: # xmm0 = xmm0[0,0,0,0]
4553 ; X86-SSE-NEXT: retl # encoding: [0xc3]
4555 ; X86-AVX1-LABEL: test_mm_set1_epi32:
4556 ; X86-AVX1: # %bb.0:
4557 ; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
4558 ; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
4559 ; X86-AVX1-NEXT: vpermilps $0, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x04,0xc0,0x00]
4560 ; X86-AVX1-NEXT: # xmm0 = xmm0[0,0,0,0]
4561 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
4563 ; X86-AVX512-LABEL: test_mm_set1_epi32:
4564 ; X86-AVX512: # %bb.0:
4565 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
4566 ; X86-AVX512-NEXT: vpbroadcastd %eax, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7c,0xc0]
4567 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
4569 ; X64-SSE-LABEL: test_mm_set1_epi32:
4571 ; X64-SSE-NEXT: movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
4572 ; X64-SSE-NEXT: pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
4573 ; X64-SSE-NEXT: # xmm0 = xmm0[0,0,0,0]
4574 ; X64-SSE-NEXT: retq # encoding: [0xc3]
4576 ; X64-AVX1-LABEL: test_mm_set1_epi32:
4577 ; X64-AVX1: # %bb.0:
4578 ; X64-AVX1-NEXT: vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
4579 ; X64-AVX1-NEXT: vpshufd $0, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x00]
4580 ; X64-AVX1-NEXT: # xmm0 = xmm0[0,0,0,0]
4581 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
4583 ; X64-AVX512-LABEL: test_mm_set1_epi32:
4584 ; X64-AVX512: # %bb.0:
4585 ; X64-AVX512-NEXT: vpbroadcastd %edi, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7c,0xc7]
4586 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
4588 ; X32-SSE-LABEL: test_mm_set1_epi32:
4590 ; X32-SSE-NEXT: movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
4591 ; X32-SSE-NEXT: pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
4592 ; X32-SSE-NEXT: # xmm0 = xmm0[0,0,0,0]
4593 ; X32-SSE-NEXT: retq # encoding: [0xc3]
4595 ; X32-AVX1-LABEL: test_mm_set1_epi32:
4596 ; X32-AVX1: # %bb.0:
4597 ; X32-AVX1-NEXT: vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
4598 ; X32-AVX1-NEXT: vpshufd $0, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x00]
4599 ; X32-AVX1-NEXT: # xmm0 = xmm0[0,0,0,0]
4600 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
4602 ; X32-AVX512-LABEL: test_mm_set1_epi32:
4603 ; X32-AVX512: # %bb.0:
4604 ; X32-AVX512-NEXT: vpbroadcastd %edi, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7c,0xc7]
4605 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
4606 %res0 = insertelement <4 x i32> undef, i32 %a0, i32 0
4607 %res1 = insertelement <4 x i32> %res0, i32 %a0, i32 1
4608 %res2 = insertelement <4 x i32> %res1, i32 %a0, i32 2
4609 %res3 = insertelement <4 x i32> %res2, i32 %a0, i32 3
4610 %res = bitcast <4 x i32> %res3 to <2 x i64>
4614 ; TODO test_mm_set1_epi64
4616 define <2 x i64> @test_mm_set1_epi64x(i64 %a0) nounwind {
4617 ; X86-SSE-LABEL: test_mm_set1_epi64x:
4619 ; X86-SSE-NEXT: movd {{[0-9]+}}(%esp), %xmm0 # encoding: [0x66,0x0f,0x6e,0x44,0x24,0x04]
4620 ; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
4621 ; X86-SSE-NEXT: movd {{[0-9]+}}(%esp), %xmm1 # encoding: [0x66,0x0f,0x6e,0x4c,0x24,0x08]
4622 ; X86-SSE-NEXT: # xmm1 = mem[0],zero,zero,zero
4623 ; X86-SSE-NEXT: punpckldq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x62,0xc1]
4624 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
4625 ; X86-SSE-NEXT: pshufd $68, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x44]
4626 ; X86-SSE-NEXT: # xmm0 = xmm0[0,1,0,1]
4627 ; X86-SSE-NEXT: retl # encoding: [0xc3]
4629 ; X86-AVX1-LABEL: test_mm_set1_epi64x:
4630 ; X86-AVX1: # %bb.0:
4631 ; X86-AVX1-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
4632 ; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
4633 ; X86-AVX1-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
4634 ; X86-AVX1-NEXT: vpshufd $68, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x44]
4635 ; X86-AVX1-NEXT: # xmm0 = xmm0[0,1,0,1]
4636 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
4638 ; X86-AVX512-LABEL: test_mm_set1_epi64x:
4639 ; X86-AVX512: # %bb.0:
4640 ; X86-AVX512-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
4641 ; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
4642 ; X86-AVX512-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
4643 ; X86-AVX512-NEXT: vpbroadcastq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x59,0xc0]
4644 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
4646 ; X64-SSE-LABEL: test_mm_set1_epi64x:
4648 ; X64-SSE-NEXT: movq %rdi, %xmm0 # encoding: [0x66,0x48,0x0f,0x6e,0xc7]
4649 ; X64-SSE-NEXT: pshufd $68, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x44]
4650 ; X64-SSE-NEXT: # xmm0 = xmm0[0,1,0,1]
4651 ; X64-SSE-NEXT: retq # encoding: [0xc3]
4653 ; X64-AVX1-LABEL: test_mm_set1_epi64x:
4654 ; X64-AVX1: # %bb.0:
4655 ; X64-AVX1-NEXT: vmovq %rdi, %xmm0 # encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
4656 ; X64-AVX1-NEXT: vpshufd $68, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x44]
4657 ; X64-AVX1-NEXT: # xmm0 = xmm0[0,1,0,1]
4658 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
4660 ; X64-AVX512-LABEL: test_mm_set1_epi64x:
4661 ; X64-AVX512: # %bb.0:
4662 ; X64-AVX512-NEXT: vpbroadcastq %rdi, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x7c,0xc7]
4663 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
4665 ; X32-SSE-LABEL: test_mm_set1_epi64x:
4667 ; X32-SSE-NEXT: movq %rdi, %xmm0 # encoding: [0x66,0x48,0x0f,0x6e,0xc7]
4668 ; X32-SSE-NEXT: pshufd $68, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x44]
4669 ; X32-SSE-NEXT: # xmm0 = xmm0[0,1,0,1]
4670 ; X32-SSE-NEXT: retq # encoding: [0xc3]
4672 ; X32-AVX1-LABEL: test_mm_set1_epi64x:
4673 ; X32-AVX1: # %bb.0:
4674 ; X32-AVX1-NEXT: vmovq %rdi, %xmm0 # encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
4675 ; X32-AVX1-NEXT: vpshufd $68, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x44]
4676 ; X32-AVX1-NEXT: # xmm0 = xmm0[0,1,0,1]
4677 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
4679 ; X32-AVX512-LABEL: test_mm_set1_epi64x:
4680 ; X32-AVX512: # %bb.0:
4681 ; X32-AVX512-NEXT: vpbroadcastq %rdi, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x7c,0xc7]
4682 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
4683 %res0 = insertelement <2 x i64> undef, i64 %a0, i32 0
4684 %res1 = insertelement <2 x i64> %res0, i64 %a0, i32 1
4688 define <2 x double> @test_mm_set1_pd(double %a0) nounwind {
4689 ; X86-SSE-LABEL: test_mm_set1_pd:
4691 ; X86-SSE-NEXT: movsd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf2,0x0f,0x10,0x44,0x24,0x04]
4692 ; X86-SSE-NEXT: # xmm0 = mem[0],zero
4693 ; X86-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
4694 ; X86-SSE-NEXT: # xmm0 = xmm0[0,0]
4695 ; X86-SSE-NEXT: retl # encoding: [0xc3]
4697 ; X86-AVX1-LABEL: test_mm_set1_pd:
4698 ; X86-AVX1: # %bb.0:
4699 ; X86-AVX1-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfb,0x10,0x44,0x24,0x04]
4700 ; X86-AVX1-NEXT: # xmm0 = mem[0],zero
4701 ; X86-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
4702 ; X86-AVX1-NEXT: # xmm0 = xmm0[0,0]
4703 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
4705 ; X86-AVX512-LABEL: test_mm_set1_pd:
4706 ; X86-AVX512: # %bb.0:
4707 ; X86-AVX512-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x44,0x24,0x04]
4708 ; X86-AVX512-NEXT: # xmm0 = mem[0],zero
4709 ; X86-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
4710 ; X86-AVX512-NEXT: # xmm0 = xmm0[0,0]
4711 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
4713 ; X64-SSE-LABEL: test_mm_set1_pd:
4715 ; X64-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
4716 ; X64-SSE-NEXT: # xmm0 = xmm0[0,0]
4717 ; X64-SSE-NEXT: retq # encoding: [0xc3]
4719 ; X64-AVX1-LABEL: test_mm_set1_pd:
4720 ; X64-AVX1: # %bb.0:
4721 ; X64-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
4722 ; X64-AVX1-NEXT: # xmm0 = xmm0[0,0]
4723 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
4725 ; X64-AVX512-LABEL: test_mm_set1_pd:
4726 ; X64-AVX512: # %bb.0:
4727 ; X64-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
4728 ; X64-AVX512-NEXT: # xmm0 = xmm0[0,0]
4729 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
4731 ; X32-SSE-LABEL: test_mm_set1_pd:
4733 ; X32-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
4734 ; X32-SSE-NEXT: # xmm0 = xmm0[0,0]
4735 ; X32-SSE-NEXT: retq # encoding: [0xc3]
4737 ; X32-AVX1-LABEL: test_mm_set1_pd:
4738 ; X32-AVX1: # %bb.0:
4739 ; X32-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
4740 ; X32-AVX1-NEXT: # xmm0 = xmm0[0,0]
4741 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
4743 ; X32-AVX512-LABEL: test_mm_set1_pd:
4744 ; X32-AVX512: # %bb.0:
4745 ; X32-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
4746 ; X32-AVX512-NEXT: # xmm0 = xmm0[0,0]
4747 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
4748 %res0 = insertelement <2 x double> undef, double %a0, i32 0
4749 %res1 = insertelement <2 x double> %res0, double %a0, i32 1
4750 ret <2 x double> %res1
4753 define <2 x i64> @test_mm_setr_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15) nounwind {
4754 ; X86-SSE-LABEL: test_mm_setr_epi8:
4756 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
4757 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4758 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x3c]
4759 ; X86-SSE-NEXT: movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
4760 ; X86-SSE-NEXT: punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
4761 ; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
4762 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
4763 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4764 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x34]
4765 ; X86-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
4766 ; X86-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
4767 ; X86-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
4768 ; X86-SSE-NEXT: punpcklwd %xmm1, %xmm2 # encoding: [0x66,0x0f,0x61,0xd1]
4769 ; X86-SSE-NEXT: # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
4770 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
4771 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4772 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x2c]
4773 ; X86-SSE-NEXT: movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
4774 ; X86-SSE-NEXT: punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
4775 ; X86-SSE-NEXT: # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
4776 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
4777 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4778 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x24]
4779 ; X86-SSE-NEXT: movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
4780 ; X86-SSE-NEXT: punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
4781 ; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
4782 ; X86-SSE-NEXT: punpcklwd %xmm3, %xmm1 # encoding: [0x66,0x0f,0x61,0xcb]
4783 ; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
4784 ; X86-SSE-NEXT: punpckldq %xmm2, %xmm1 # encoding: [0x66,0x0f,0x62,0xca]
4785 ; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
4786 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
4787 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4788 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x1c]
4789 ; X86-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
4790 ; X86-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
4791 ; X86-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
4792 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
4793 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4794 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x14]
4795 ; X86-SSE-NEXT: movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
4796 ; X86-SSE-NEXT: punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
4797 ; X86-SSE-NEXT: # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
4798 ; X86-SSE-NEXT: punpcklwd %xmm2, %xmm3 # encoding: [0x66,0x0f,0x61,0xda]
4799 ; X86-SSE-NEXT: # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
4800 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
4801 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4802 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x0c]
4803 ; X86-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
4804 ; X86-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
4805 ; X86-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
4806 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
4807 ; X86-SSE-NEXT: movd %eax, %xmm4 # encoding: [0x66,0x0f,0x6e,0xe0]
4808 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
4809 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4810 ; X86-SSE-NEXT: punpcklbw %xmm4, %xmm0 # encoding: [0x66,0x0f,0x60,0xc4]
4811 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
4812 ; X86-SSE-NEXT: punpcklwd %xmm2, %xmm0 # encoding: [0x66,0x0f,0x61,0xc2]
4813 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
4814 ; X86-SSE-NEXT: punpckldq %xmm3, %xmm0 # encoding: [0x66,0x0f,0x62,0xc3]
4815 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
4816 ; X86-SSE-NEXT: punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
4817 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
4818 ; X86-SSE-NEXT: retl # encoding: [0xc3]
4820 ; X86-AVX1-LABEL: test_mm_setr_epi8:
4821 ; X86-AVX1: # %bb.0:
4822 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
4823 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x04]
4824 ; X86-AVX1-NEXT: vmovd %ecx, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc1]
4825 ; X86-AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x01]
4826 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x0c]
4827 ; X86-AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
4828 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
4829 ; X86-AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
4830 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x14]
4831 ; X86-AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
4832 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
4833 ; X86-AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
4834 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x1c]
4835 ; X86-AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
4836 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
4837 ; X86-AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
4838 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x24]
4839 ; X86-AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
4840 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
4841 ; X86-AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
4842 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x2c]
4843 ; X86-AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
4844 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
4845 ; X86-AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
4846 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x34]
4847 ; X86-AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
4848 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
4849 ; X86-AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
4850 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x3c]
4851 ; X86-AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
4852 ; X86-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
4853 ; X86-AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
4854 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
4856 ; X86-AVX512-LABEL: test_mm_setr_epi8:
4857 ; X86-AVX512: # %bb.0:
4858 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
4859 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x04]
4860 ; X86-AVX512-NEXT: vmovd %ecx, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
4861 ; X86-AVX512-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x01]
4862 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x0c]
4863 ; X86-AVX512-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
4864 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
4865 ; X86-AVX512-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
4866 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x14]
4867 ; X86-AVX512-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
4868 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
4869 ; X86-AVX512-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
4870 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x1c]
4871 ; X86-AVX512-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
4872 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
4873 ; X86-AVX512-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
4874 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x24]
4875 ; X86-AVX512-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
4876 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
4877 ; X86-AVX512-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
4878 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x2c]
4879 ; X86-AVX512-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
4880 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
4881 ; X86-AVX512-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
4882 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x34]
4883 ; X86-AVX512-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
4884 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
4885 ; X86-AVX512-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
4886 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x3c]
4887 ; X86-AVX512-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
4888 ; X86-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
4889 ; X86-AVX512-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
4890 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
4892 ; X64-SSE-LABEL: test_mm_setr_epi8:
4894 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x50]
4895 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4896 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x48]
4897 ; X64-SSE-NEXT: movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
4898 ; X64-SSE-NEXT: punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
4899 ; X64-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
4900 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
4901 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4902 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
4903 ; X64-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
4904 ; X64-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
4905 ; X64-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
4906 ; X64-SSE-NEXT: punpcklwd %xmm1, %xmm2 # encoding: [0x66,0x0f,0x61,0xd1]
4907 ; X64-SSE-NEXT: # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
4908 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
4909 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4910 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
4911 ; X64-SSE-NEXT: movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
4912 ; X64-SSE-NEXT: punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
4913 ; X64-SSE-NEXT: # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
4914 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
4915 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4916 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
4917 ; X64-SSE-NEXT: movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
4918 ; X64-SSE-NEXT: punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
4919 ; X64-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
4920 ; X64-SSE-NEXT: punpcklwd %xmm3, %xmm1 # encoding: [0x66,0x0f,0x61,0xcb]
4921 ; X64-SSE-NEXT: # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
4922 ; X64-SSE-NEXT: punpckldq %xmm2, %xmm1 # encoding: [0x66,0x0f,0x62,0xca]
4923 ; X64-SSE-NEXT: # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
4924 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
4925 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4926 ; X64-SSE-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
4927 ; X64-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
4928 ; X64-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
4929 ; X64-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
4930 ; X64-SSE-NEXT: movzbl %r9b, %eax # encoding: [0x41,0x0f,0xb6,0xc1]
4931 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4932 ; X64-SSE-NEXT: movzbl %r8b, %eax # encoding: [0x41,0x0f,0xb6,0xc0]
4933 ; X64-SSE-NEXT: movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
4934 ; X64-SSE-NEXT: punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
4935 ; X64-SSE-NEXT: # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
4936 ; X64-SSE-NEXT: punpcklwd %xmm2, %xmm3 # encoding: [0x66,0x0f,0x61,0xda]
4937 ; X64-SSE-NEXT: # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
4938 ; X64-SSE-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
4939 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4940 ; X64-SSE-NEXT: movzbl %dl, %eax # encoding: [0x0f,0xb6,0xc2]
4941 ; X64-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
4942 ; X64-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
4943 ; X64-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
4944 ; X64-SSE-NEXT: movzbl %sil, %eax # encoding: [0x40,0x0f,0xb6,0xc6]
4945 ; X64-SSE-NEXT: movd %eax, %xmm4 # encoding: [0x66,0x0f,0x6e,0xe0]
4946 ; X64-SSE-NEXT: movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
4947 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
4948 ; X64-SSE-NEXT: punpcklbw %xmm4, %xmm0 # encoding: [0x66,0x0f,0x60,0xc4]
4949 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
4950 ; X64-SSE-NEXT: punpcklwd %xmm2, %xmm0 # encoding: [0x66,0x0f,0x61,0xc2]
4951 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
4952 ; X64-SSE-NEXT: punpckldq %xmm3, %xmm0 # encoding: [0x66,0x0f,0x62,0xc3]
4953 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
4954 ; X64-SSE-NEXT: punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
4955 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
4956 ; X64-SSE-NEXT: retq # encoding: [0xc3]
4958 ; X64-AVX1-LABEL: test_mm_setr_epi8:
4959 ; X64-AVX1: # %bb.0:
4960 ; X64-AVX1-NEXT: movzbl %sil, %eax # encoding: [0x40,0x0f,0xb6,0xc6]
4961 ; X64-AVX1-NEXT: movzbl %dil, %esi # encoding: [0x40,0x0f,0xb6,0xf7]
4962 ; X64-AVX1-NEXT: vmovd %esi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc6]
4963 ; X64-AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x01]
4964 ; X64-AVX1-NEXT: movzbl %dl, %eax # encoding: [0x0f,0xb6,0xc2]
4965 ; X64-AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
4966 ; X64-AVX1-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
4967 ; X64-AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
4968 ; X64-AVX1-NEXT: movzbl %r8b, %eax # encoding: [0x41,0x0f,0xb6,0xc0]
4969 ; X64-AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
4970 ; X64-AVX1-NEXT: movzbl %r9b, %eax # encoding: [0x41,0x0f,0xb6,0xc1]
4971 ; X64-AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
4972 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
4973 ; X64-AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
4974 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
4975 ; X64-AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
4976 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
4977 ; X64-AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
4978 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
4979 ; X64-AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
4980 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
4981 ; X64-AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
4982 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
4983 ; X64-AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
4984 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
4985 ; X64-AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
4986 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
4987 ; X64-AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
4988 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x48]
4989 ; X64-AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
4990 ; X64-AVX1-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x50]
4991 ; X64-AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
4992 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
4994 ; X64-AVX512-LABEL: test_mm_setr_epi8:
4995 ; X64-AVX512: # %bb.0:
4996 ; X64-AVX512-NEXT: movzbl %sil, %eax # encoding: [0x40,0x0f,0xb6,0xc6]
4997 ; X64-AVX512-NEXT: movzbl %dil, %esi # encoding: [0x40,0x0f,0xb6,0xf7]
4998 ; X64-AVX512-NEXT: vmovd %esi, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc6]
4999 ; X64-AVX512-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x01]
5000 ; X64-AVX512-NEXT: movzbl %dl, %eax # encoding: [0x0f,0xb6,0xc2]
5001 ; X64-AVX512-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
5002 ; X64-AVX512-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
5003 ; X64-AVX512-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
5004 ; X64-AVX512-NEXT: movzbl %r8b, %eax # encoding: [0x41,0x0f,0xb6,0xc0]
5005 ; X64-AVX512-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
5006 ; X64-AVX512-NEXT: movzbl %r9b, %eax # encoding: [0x41,0x0f,0xb6,0xc1]
5007 ; X64-AVX512-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
5008 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
5009 ; X64-AVX512-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
5010 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
5011 ; X64-AVX512-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
5012 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
5013 ; X64-AVX512-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
5014 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
5015 ; X64-AVX512-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
5016 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
5017 ; X64-AVX512-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
5018 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
5019 ; X64-AVX512-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
5020 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
5021 ; X64-AVX512-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
5022 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
5023 ; X64-AVX512-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
5024 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x48]
5025 ; X64-AVX512-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
5026 ; X64-AVX512-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x50]
5027 ; X64-AVX512-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
5028 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
5030 ; X32-SSE-LABEL: test_mm_setr_epi8:
5032 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x50]
5033 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
5034 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x48]
5035 ; X32-SSE-NEXT: movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
5036 ; X32-SSE-NEXT: punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
5037 ; X32-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
5038 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x40]
5039 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
5040 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x38]
5041 ; X32-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
5042 ; X32-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
5043 ; X32-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
5044 ; X32-SSE-NEXT: punpcklwd %xmm1, %xmm2 # encoding: [0x66,0x0f,0x61,0xd1]
5045 ; X32-SSE-NEXT: # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
5046 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x30]
5047 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
5048 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x28]
5049 ; X32-SSE-NEXT: movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
5050 ; X32-SSE-NEXT: punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
5051 ; X32-SSE-NEXT: # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
5052 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x20]
5053 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
5054 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x18]
5055 ; X32-SSE-NEXT: movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
5056 ; X32-SSE-NEXT: punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
5057 ; X32-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
5058 ; X32-SSE-NEXT: punpcklwd %xmm3, %xmm1 # encoding: [0x66,0x0f,0x61,0xcb]
5059 ; X32-SSE-NEXT: # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
5060 ; X32-SSE-NEXT: punpckldq %xmm2, %xmm1 # encoding: [0x66,0x0f,0x62,0xca]
5061 ; X32-SSE-NEXT: # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
5062 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x10]
5063 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
5064 ; X32-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x08]
5065 ; X32-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
5066 ; X32-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
5067 ; X32-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
5068 ; X32-SSE-NEXT: movzbl %r9b, %eax # encoding: [0x41,0x0f,0xb6,0xc1]
5069 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
5070 ; X32-SSE-NEXT: movzbl %r8b, %eax # encoding: [0x41,0x0f,0xb6,0xc0]
5071 ; X32-SSE-NEXT: movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
5072 ; X32-SSE-NEXT: punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
5073 ; X32-SSE-NEXT: # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
5074 ; X32-SSE-NEXT: punpcklwd %xmm2, %xmm3 # encoding: [0x66,0x0f,0x61,0xda]
5075 ; X32-SSE-NEXT: # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
5076 ; X32-SSE-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
5077 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
5078 ; X32-SSE-NEXT: movzbl %dl, %eax # encoding: [0x0f,0xb6,0xc2]
5079 ; X32-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
5080 ; X32-SSE-NEXT: punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
5081 ; X32-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
5082 ; X32-SSE-NEXT: movzbl %sil, %eax # encoding: [0x40,0x0f,0xb6,0xc6]
5083 ; X32-SSE-NEXT: movd %eax, %xmm4 # encoding: [0x66,0x0f,0x6e,0xe0]
5084 ; X32-SSE-NEXT: movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
5085 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
5086 ; X32-SSE-NEXT: punpcklbw %xmm4, %xmm0 # encoding: [0x66,0x0f,0x60,0xc4]
5087 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
5088 ; X32-SSE-NEXT: punpcklwd %xmm2, %xmm0 # encoding: [0x66,0x0f,0x61,0xc2]
5089 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
5090 ; X32-SSE-NEXT: punpckldq %xmm3, %xmm0 # encoding: [0x66,0x0f,0x62,0xc3]
5091 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
5092 ; X32-SSE-NEXT: punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
5093 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
5094 ; X32-SSE-NEXT: retq # encoding: [0xc3]
5096 ; X32-AVX1-LABEL: test_mm_setr_epi8:
5097 ; X32-AVX1: # %bb.0:
5098 ; X32-AVX1-NEXT: movzbl %sil, %eax # encoding: [0x40,0x0f,0xb6,0xc6]
5099 ; X32-AVX1-NEXT: movzbl %dil, %esi # encoding: [0x40,0x0f,0xb6,0xf7]
5100 ; X32-AVX1-NEXT: vmovd %esi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc6]
5101 ; X32-AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x01]
5102 ; X32-AVX1-NEXT: movzbl %dl, %eax # encoding: [0x0f,0xb6,0xc2]
5103 ; X32-AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
5104 ; X32-AVX1-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
5105 ; X32-AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
5106 ; X32-AVX1-NEXT: movzbl %r8b, %eax # encoding: [0x41,0x0f,0xb6,0xc0]
5107 ; X32-AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
5108 ; X32-AVX1-NEXT: movzbl %r9b, %eax # encoding: [0x41,0x0f,0xb6,0xc1]
5109 ; X32-AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
5110 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x08]
5111 ; X32-AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
5112 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x10]
5113 ; X32-AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
5114 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x18]
5115 ; X32-AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
5116 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x20]
5117 ; X32-AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
5118 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x28]
5119 ; X32-AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
5120 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x30]
5121 ; X32-AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
5122 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x38]
5123 ; X32-AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
5124 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x40]
5125 ; X32-AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
5126 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x48]
5127 ; X32-AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
5128 ; X32-AVX1-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x50]
5129 ; X32-AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
5130 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
5132 ; X32-AVX512-LABEL: test_mm_setr_epi8:
5133 ; X32-AVX512: # %bb.0:
5134 ; X32-AVX512-NEXT: movzbl %sil, %eax # encoding: [0x40,0x0f,0xb6,0xc6]
5135 ; X32-AVX512-NEXT: movzbl %dil, %esi # encoding: [0x40,0x0f,0xb6,0xf7]
5136 ; X32-AVX512-NEXT: vmovd %esi, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc6]
5137 ; X32-AVX512-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x01]
5138 ; X32-AVX512-NEXT: movzbl %dl, %eax # encoding: [0x0f,0xb6,0xc2]
5139 ; X32-AVX512-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
5140 ; X32-AVX512-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
5141 ; X32-AVX512-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
5142 ; X32-AVX512-NEXT: movzbl %r8b, %eax # encoding: [0x41,0x0f,0xb6,0xc0]
5143 ; X32-AVX512-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
5144 ; X32-AVX512-NEXT: movzbl %r9b, %eax # encoding: [0x41,0x0f,0xb6,0xc1]
5145 ; X32-AVX512-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
5146 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x08]
5147 ; X32-AVX512-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
5148 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x10]
5149 ; X32-AVX512-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
5150 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x18]
5151 ; X32-AVX512-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
5152 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x20]
5153 ; X32-AVX512-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
5154 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x28]
5155 ; X32-AVX512-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
5156 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x30]
5157 ; X32-AVX512-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
5158 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x38]
5159 ; X32-AVX512-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
5160 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x40]
5161 ; X32-AVX512-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
5162 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x48]
5163 ; X32-AVX512-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
5164 ; X32-AVX512-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x50]
5165 ; X32-AVX512-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
5166 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
5167 %res0 = insertelement <16 x i8> undef, i8 %a0 , i32 0
5168 %res1 = insertelement <16 x i8> %res0, i8 %a1 , i32 1
5169 %res2 = insertelement <16 x i8> %res1, i8 %a2 , i32 2
5170 %res3 = insertelement <16 x i8> %res2, i8 %a3 , i32 3
5171 %res4 = insertelement <16 x i8> %res3, i8 %a4 , i32 4
5172 %res5 = insertelement <16 x i8> %res4, i8 %a5 , i32 5
5173 %res6 = insertelement <16 x i8> %res5, i8 %a6 , i32 6
5174 %res7 = insertelement <16 x i8> %res6, i8 %a7 , i32 7
5175 %res8 = insertelement <16 x i8> %res7, i8 %a8 , i32 8
5176 %res9 = insertelement <16 x i8> %res8, i8 %a9 , i32 9
5177 %res10 = insertelement <16 x i8> %res9, i8 %a10, i32 10
5178 %res11 = insertelement <16 x i8> %res10, i8 %a11, i32 11
5179 %res12 = insertelement <16 x i8> %res11, i8 %a12, i32 12
5180 %res13 = insertelement <16 x i8> %res12, i8 %a13, i32 13
5181 %res14 = insertelement <16 x i8> %res13, i8 %a14, i32 14
5182 %res15 = insertelement <16 x i8> %res14, i8 %a15, i32 15
5183 %res = bitcast <16 x i8> %res15 to <2 x i64>
5187 define <2 x i64> @test_mm_setr_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) nounwind {
5188 ; X86-SSE-LABEL: test_mm_setr_epi16:
5190 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x20]
5191 ; X86-SSE-NEXT: movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
5192 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x1c]
5193 ; X86-SSE-NEXT: movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
5194 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x18]
5195 ; X86-SSE-NEXT: movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
5196 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x14]
5197 ; X86-SSE-NEXT: movd %eax, %xmm4 # encoding: [0x66,0x0f,0x6e,0xe0]
5198 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
5199 ; X86-SSE-NEXT: movd %eax, %xmm5 # encoding: [0x66,0x0f,0x6e,0xe8]
5200 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x0c]
5201 ; X86-SSE-NEXT: movd %eax, %xmm6 # encoding: [0x66,0x0f,0x6e,0xf0]
5202 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x08]
5203 ; X86-SSE-NEXT: movd %eax, %xmm7 # encoding: [0x66,0x0f,0x6e,0xf8]
5204 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
5205 ; X86-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
5206 ; X86-SSE-NEXT: punpcklwd %xmm1, %xmm2 # encoding: [0x66,0x0f,0x61,0xd1]
5207 ; X86-SSE-NEXT: # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
5208 ; X86-SSE-NEXT: punpcklwd %xmm3, %xmm4 # encoding: [0x66,0x0f,0x61,0xe3]
5209 ; X86-SSE-NEXT: # xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
5210 ; X86-SSE-NEXT: punpckldq %xmm2, %xmm4 # encoding: [0x66,0x0f,0x62,0xe2]
5211 ; X86-SSE-NEXT: # xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
5212 ; X86-SSE-NEXT: punpcklwd %xmm5, %xmm6 # encoding: [0x66,0x0f,0x61,0xf5]
5213 ; X86-SSE-NEXT: # xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
5214 ; X86-SSE-NEXT: punpcklwd %xmm7, %xmm0 # encoding: [0x66,0x0f,0x61,0xc7]
5215 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
5216 ; X86-SSE-NEXT: punpckldq %xmm6, %xmm0 # encoding: [0x66,0x0f,0x62,0xc6]
5217 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
5218 ; X86-SSE-NEXT: punpcklqdq %xmm4, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc4]
5219 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm4[0]
5220 ; X86-SSE-NEXT: retl # encoding: [0xc3]
5222 ; X86-AVX1-LABEL: test_mm_setr_epi16:
5223 ; X86-AVX1: # %bb.0:
5224 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
5225 ; X86-AVX1-NEXT: vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
5226 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x08]
5227 ; X86-AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
5228 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x0c]
5229 ; X86-AVX1-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x02]
5230 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
5231 ; X86-AVX1-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x03]
5232 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x14]
5233 ; X86-AVX1-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x04]
5234 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x18]
5235 ; X86-AVX1-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x05]
5236 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x1c]
5237 ; X86-AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
5238 ; X86-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x20]
5239 ; X86-AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
5240 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
5242 ; X86-AVX512-LABEL: test_mm_setr_epi16:
5243 ; X86-AVX512: # %bb.0:
5244 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
5245 ; X86-AVX512-NEXT: vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
5246 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x08]
5247 ; X86-AVX512-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
5248 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x0c]
5249 ; X86-AVX512-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x02]
5250 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
5251 ; X86-AVX512-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x03]
5252 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x14]
5253 ; X86-AVX512-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x04]
5254 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x18]
5255 ; X86-AVX512-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x05]
5256 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x1c]
5257 ; X86-AVX512-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
5258 ; X86-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x20]
5259 ; X86-AVX512-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
5260 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
5262 ; X64-SSE-LABEL: test_mm_setr_epi16:
5264 ; X64-SSE-NEXT: movzwl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
5265 ; X64-SSE-NEXT: movzwl {{[0-9]+}}(%rsp), %r10d # encoding: [0x44,0x0f,0xb7,0x54,0x24,0x08]
5266 ; X64-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
5267 ; X64-SSE-NEXT: movd %r10d, %xmm1 # encoding: [0x66,0x41,0x0f,0x6e,0xca]
5268 ; X64-SSE-NEXT: punpcklwd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x61,0xc8]
5269 ; X64-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
5270 ; X64-SSE-NEXT: movd %r9d, %xmm0 # encoding: [0x66,0x41,0x0f,0x6e,0xc1]
5271 ; X64-SSE-NEXT: movd %r8d, %xmm2 # encoding: [0x66,0x41,0x0f,0x6e,0xd0]
5272 ; X64-SSE-NEXT: punpcklwd %xmm0, %xmm2 # encoding: [0x66,0x0f,0x61,0xd0]
5273 ; X64-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
5274 ; X64-SSE-NEXT: punpckldq %xmm1, %xmm2 # encoding: [0x66,0x0f,0x62,0xd1]
5275 ; X64-SSE-NEXT: # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
5276 ; X64-SSE-NEXT: movd %ecx, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc1]
5277 ; X64-SSE-NEXT: movd %edx, %xmm1 # encoding: [0x66,0x0f,0x6e,0xca]
5278 ; X64-SSE-NEXT: punpcklwd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x61,0xc8]
5279 ; X64-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
5280 ; X64-SSE-NEXT: movd %esi, %xmm3 # encoding: [0x66,0x0f,0x6e,0xde]
5281 ; X64-SSE-NEXT: movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
5282 ; X64-SSE-NEXT: punpcklwd %xmm3, %xmm0 # encoding: [0x66,0x0f,0x61,0xc3]
5283 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
5284 ; X64-SSE-NEXT: punpckldq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x62,0xc1]
5285 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5286 ; X64-SSE-NEXT: punpcklqdq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc2]
5287 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0]
5288 ; X64-SSE-NEXT: retq # encoding: [0xc3]
5290 ; X64-AVX1-LABEL: test_mm_setr_epi16:
5291 ; X64-AVX1: # %bb.0:
5292 ; X64-AVX1-NEXT: movzwl {{[0-9]+}}(%rsp), %r10d # encoding: [0x44,0x0f,0xb7,0x54,0x24,0x10]
5293 ; X64-AVX1-NEXT: movzwl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x08]
5294 ; X64-AVX1-NEXT: vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
5295 ; X64-AVX1-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc6,0x01]
5296 ; X64-AVX1-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc2,0x02]
5297 ; X64-AVX1-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc1,0x03]
5298 ; X64-AVX1-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc0,0x04]
5299 ; X64-AVX1-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc1,0x05]
5300 ; X64-AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
5301 ; X64-AVX1-NEXT: vpinsrw $7, %r10d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc2,0x07]
5302 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
5304 ; X64-AVX512-LABEL: test_mm_setr_epi16:
5305 ; X64-AVX512: # %bb.0:
5306 ; X64-AVX512-NEXT: movzwl {{[0-9]+}}(%rsp), %r10d # encoding: [0x44,0x0f,0xb7,0x54,0x24,0x10]
5307 ; X64-AVX512-NEXT: movzwl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x08]
5308 ; X64-AVX512-NEXT: vmovd %edi, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
5309 ; X64-AVX512-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc6,0x01]
5310 ; X64-AVX512-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc2,0x02]
5311 ; X64-AVX512-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc1,0x03]
5312 ; X64-AVX512-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc0,0x04]
5313 ; X64-AVX512-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc1,0x05]
5314 ; X64-AVX512-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
5315 ; X64-AVX512-NEXT: vpinsrw $7, %r10d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc2,0x07]
5316 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
5318 ; X32-SSE-LABEL: test_mm_setr_epi16:
5320 ; X32-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb7,0x44,0x24,0x10]
5321 ; X32-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %r10d # encoding: [0x67,0x44,0x0f,0xb7,0x54,0x24,0x08]
5322 ; X32-SSE-NEXT: movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
5323 ; X32-SSE-NEXT: movd %r10d, %xmm1 # encoding: [0x66,0x41,0x0f,0x6e,0xca]
5324 ; X32-SSE-NEXT: punpcklwd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x61,0xc8]
5325 ; X32-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
5326 ; X32-SSE-NEXT: movd %r9d, %xmm0 # encoding: [0x66,0x41,0x0f,0x6e,0xc1]
5327 ; X32-SSE-NEXT: movd %r8d, %xmm2 # encoding: [0x66,0x41,0x0f,0x6e,0xd0]
5328 ; X32-SSE-NEXT: punpcklwd %xmm0, %xmm2 # encoding: [0x66,0x0f,0x61,0xd0]
5329 ; X32-SSE-NEXT: # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
5330 ; X32-SSE-NEXT: punpckldq %xmm1, %xmm2 # encoding: [0x66,0x0f,0x62,0xd1]
5331 ; X32-SSE-NEXT: # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
5332 ; X32-SSE-NEXT: movd %ecx, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc1]
5333 ; X32-SSE-NEXT: movd %edx, %xmm1 # encoding: [0x66,0x0f,0x6e,0xca]
5334 ; X32-SSE-NEXT: punpcklwd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x61,0xc8]
5335 ; X32-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
5336 ; X32-SSE-NEXT: movd %esi, %xmm3 # encoding: [0x66,0x0f,0x6e,0xde]
5337 ; X32-SSE-NEXT: movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
5338 ; X32-SSE-NEXT: punpcklwd %xmm3, %xmm0 # encoding: [0x66,0x0f,0x61,0xc3]
5339 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
5340 ; X32-SSE-NEXT: punpckldq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x62,0xc1]
5341 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5342 ; X32-SSE-NEXT: punpcklqdq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc2]
5343 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0]
5344 ; X32-SSE-NEXT: retq # encoding: [0xc3]
5346 ; X32-AVX1-LABEL: test_mm_setr_epi16:
5347 ; X32-AVX1: # %bb.0:
5348 ; X32-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %r10d # encoding: [0x67,0x44,0x0f,0xb7,0x54,0x24,0x10]
5349 ; X32-AVX1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb7,0x44,0x24,0x08]
5350 ; X32-AVX1-NEXT: vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
5351 ; X32-AVX1-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc6,0x01]
5352 ; X32-AVX1-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc2,0x02]
5353 ; X32-AVX1-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc1,0x03]
5354 ; X32-AVX1-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc0,0x04]
5355 ; X32-AVX1-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc1,0x05]
5356 ; X32-AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
5357 ; X32-AVX1-NEXT: vpinsrw $7, %r10d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc2,0x07]
5358 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
5360 ; X32-AVX512-LABEL: test_mm_setr_epi16:
5361 ; X32-AVX512: # %bb.0:
5362 ; X32-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %r10d # encoding: [0x67,0x44,0x0f,0xb7,0x54,0x24,0x10]
5363 ; X32-AVX512-NEXT: movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb7,0x44,0x24,0x08]
5364 ; X32-AVX512-NEXT: vmovd %edi, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
5365 ; X32-AVX512-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc6,0x01]
5366 ; X32-AVX512-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc2,0x02]
5367 ; X32-AVX512-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc1,0x03]
5368 ; X32-AVX512-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc0,0x04]
5369 ; X32-AVX512-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc1,0x05]
5370 ; X32-AVX512-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
5371 ; X32-AVX512-NEXT: vpinsrw $7, %r10d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc2,0x07]
5372 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
5373 %res0 = insertelement <8 x i16> undef, i16 %a0, i32 0
5374 %res1 = insertelement <8 x i16> %res0, i16 %a1, i32 1
5375 %res2 = insertelement <8 x i16> %res1, i16 %a2, i32 2
5376 %res3 = insertelement <8 x i16> %res2, i16 %a3, i32 3
5377 %res4 = insertelement <8 x i16> %res3, i16 %a4, i32 4
5378 %res5 = insertelement <8 x i16> %res4, i16 %a5, i32 5
5379 %res6 = insertelement <8 x i16> %res5, i16 %a6, i32 6
5380 %res7 = insertelement <8 x i16> %res6, i16 %a7, i32 7
5381 %res = bitcast <8 x i16> %res7 to <2 x i64>
5385 define <2 x i64> @test_mm_setr_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind {
5386 ; X86-SSE-LABEL: test_mm_setr_epi32:
5388 ; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x10]
5389 ; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
5390 ; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x0c]
5391 ; X86-SSE-NEXT: # xmm1 = mem[0],zero,zero,zero
5392 ; X86-SSE-NEXT: unpcklps %xmm0, %xmm1 # encoding: [0x0f,0x14,0xc8]
5393 ; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5394 ; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm2 # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x08]
5395 ; X86-SSE-NEXT: # xmm2 = mem[0],zero,zero,zero
5396 ; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
5397 ; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
5398 ; X86-SSE-NEXT: unpcklps %xmm2, %xmm0 # encoding: [0x0f,0x14,0xc2]
5399 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
5400 ; X86-SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
5401 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
5402 ; X86-SSE-NEXT: retl # encoding: [0xc3]
5404 ; X86-AVX1-LABEL: test_mm_setr_epi32:
5405 ; X86-AVX1: # %bb.0:
5406 ; X86-AVX1-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
5407 ; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
5408 ; X86-AVX1-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
5409 ; X86-AVX1-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x02]
5410 ; X86-AVX1-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x03]
5411 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
5413 ; X86-AVX512-LABEL: test_mm_setr_epi32:
5414 ; X86-AVX512: # %bb.0:
5415 ; X86-AVX512-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
5416 ; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
5417 ; X86-AVX512-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
5418 ; X86-AVX512-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x02]
5419 ; X86-AVX512-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x03]
5420 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
5422 ; X64-SSE-LABEL: test_mm_setr_epi32:
5424 ; X64-SSE-NEXT: movd %ecx, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc1]
5425 ; X64-SSE-NEXT: movd %edx, %xmm1 # encoding: [0x66,0x0f,0x6e,0xca]
5426 ; X64-SSE-NEXT: punpckldq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x62,0xc8]
5427 ; X64-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5428 ; X64-SSE-NEXT: movd %esi, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd6]
5429 ; X64-SSE-NEXT: movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
5430 ; X64-SSE-NEXT: punpckldq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x62,0xc2]
5431 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
5432 ; X64-SSE-NEXT: punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
5433 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
5434 ; X64-SSE-NEXT: retq # encoding: [0xc3]
5436 ; X64-AVX1-LABEL: test_mm_setr_epi32:
5437 ; X64-AVX1: # %bb.0:
5438 ; X64-AVX1-NEXT: vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
5439 ; X64-AVX1-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc6,0x01]
5440 ; X64-AVX1-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc2,0x02]
5441 ; X64-AVX1-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc1,0x03]
5442 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
5444 ; X64-AVX512-LABEL: test_mm_setr_epi32:
5445 ; X64-AVX512: # %bb.0:
5446 ; X64-AVX512-NEXT: vmovd %edi, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
5447 ; X64-AVX512-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc6,0x01]
5448 ; X64-AVX512-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc2,0x02]
5449 ; X64-AVX512-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc1,0x03]
5450 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
5452 ; X32-SSE-LABEL: test_mm_setr_epi32:
5454 ; X32-SSE-NEXT: movd %ecx, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc1]
5455 ; X32-SSE-NEXT: movd %edx, %xmm1 # encoding: [0x66,0x0f,0x6e,0xca]
5456 ; X32-SSE-NEXT: punpckldq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x62,0xc8]
5457 ; X32-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5458 ; X32-SSE-NEXT: movd %esi, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd6]
5459 ; X32-SSE-NEXT: movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
5460 ; X32-SSE-NEXT: punpckldq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x62,0xc2]
5461 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
5462 ; X32-SSE-NEXT: punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
5463 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
5464 ; X32-SSE-NEXT: retq # encoding: [0xc3]
5466 ; X32-AVX1-LABEL: test_mm_setr_epi32:
5467 ; X32-AVX1: # %bb.0:
5468 ; X32-AVX1-NEXT: vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
5469 ; X32-AVX1-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc6,0x01]
5470 ; X32-AVX1-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc2,0x02]
5471 ; X32-AVX1-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc1,0x03]
5472 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
5474 ; X32-AVX512-LABEL: test_mm_setr_epi32:
5475 ; X32-AVX512: # %bb.0:
5476 ; X32-AVX512-NEXT: vmovd %edi, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
5477 ; X32-AVX512-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc6,0x01]
5478 ; X32-AVX512-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc2,0x02]
5479 ; X32-AVX512-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc1,0x03]
5480 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
5481 %res0 = insertelement <4 x i32> undef, i32 %a0, i32 0
5482 %res1 = insertelement <4 x i32> %res0, i32 %a1, i32 1
5483 %res2 = insertelement <4 x i32> %res1, i32 %a2, i32 2
5484 %res3 = insertelement <4 x i32> %res2, i32 %a3, i32 3
5485 %res = bitcast <4 x i32> %res3 to <2 x i64>
5489 ; TODO test_mm_setr_epi64
5491 define <2 x i64> @test_mm_setr_epi64x(i64 %a0, i64 %a1) nounwind {
5492 ; X86-SSE-LABEL: test_mm_setr_epi64x:
5494 ; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x0c]
5495 ; X86-SSE-NEXT: # xmm1 = mem[0],zero,zero,zero
5496 ; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x10]
5497 ; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
5498 ; X86-SSE-NEXT: unpcklps %xmm0, %xmm1 # encoding: [0x0f,0x14,0xc8]
5499 ; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5500 ; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
5501 ; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
5502 ; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm2 # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x08]
5503 ; X86-SSE-NEXT: # xmm2 = mem[0],zero,zero,zero
5504 ; X86-SSE-NEXT: unpcklps %xmm2, %xmm0 # encoding: [0x0f,0x14,0xc2]
5505 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
5506 ; X86-SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
5507 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
5508 ; X86-SSE-NEXT: retl # encoding: [0xc3]
5510 ; X86-AVX1-LABEL: test_mm_setr_epi64x:
5511 ; X86-AVX1: # %bb.0:
5512 ; X86-AVX1-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
5513 ; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
5514 ; X86-AVX1-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
5515 ; X86-AVX1-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x02]
5516 ; X86-AVX1-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x03]
5517 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
5519 ; X86-AVX512-LABEL: test_mm_setr_epi64x:
5520 ; X86-AVX512: # %bb.0:
5521 ; X86-AVX512-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
5522 ; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
5523 ; X86-AVX512-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
5524 ; X86-AVX512-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x02]
5525 ; X86-AVX512-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x03]
5526 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
5528 ; X64-SSE-LABEL: test_mm_setr_epi64x:
5530 ; X64-SSE-NEXT: movq %rsi, %xmm1 # encoding: [0x66,0x48,0x0f,0x6e,0xce]
5531 ; X64-SSE-NEXT: movq %rdi, %xmm0 # encoding: [0x66,0x48,0x0f,0x6e,0xc7]
5532 ; X64-SSE-NEXT: punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
5533 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
5534 ; X64-SSE-NEXT: retq # encoding: [0xc3]
5536 ; X64-AVX1-LABEL: test_mm_setr_epi64x:
5537 ; X64-AVX1: # %bb.0:
5538 ; X64-AVX1-NEXT: vmovq %rsi, %xmm0 # encoding: [0xc4,0xe1,0xf9,0x6e,0xc6]
5539 ; X64-AVX1-NEXT: vmovq %rdi, %xmm1 # encoding: [0xc4,0xe1,0xf9,0x6e,0xcf]
5540 ; X64-AVX1-NEXT: vpunpcklqdq %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0x6c,0xc0]
5541 ; X64-AVX1-NEXT: # xmm0 = xmm1[0],xmm0[0]
5542 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
5544 ; X64-AVX512-LABEL: test_mm_setr_epi64x:
5545 ; X64-AVX512: # %bb.0:
5546 ; X64-AVX512-NEXT: vmovq %rsi, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xc6]
5547 ; X64-AVX512-NEXT: vmovq %rdi, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xcf]
5548 ; X64-AVX512-NEXT: vpunpcklqdq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x6c,0xc0]
5549 ; X64-AVX512-NEXT: # xmm0 = xmm1[0],xmm0[0]
5550 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
5552 ; X32-SSE-LABEL: test_mm_setr_epi64x:
5554 ; X32-SSE-NEXT: movq %rsi, %xmm1 # encoding: [0x66,0x48,0x0f,0x6e,0xce]
5555 ; X32-SSE-NEXT: movq %rdi, %xmm0 # encoding: [0x66,0x48,0x0f,0x6e,0xc7]
5556 ; X32-SSE-NEXT: punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
5557 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
5558 ; X32-SSE-NEXT: retq # encoding: [0xc3]
5560 ; X32-AVX1-LABEL: test_mm_setr_epi64x:
5561 ; X32-AVX1: # %bb.0:
5562 ; X32-AVX1-NEXT: vmovq %rsi, %xmm0 # encoding: [0xc4,0xe1,0xf9,0x6e,0xc6]
5563 ; X32-AVX1-NEXT: vmovq %rdi, %xmm1 # encoding: [0xc4,0xe1,0xf9,0x6e,0xcf]
5564 ; X32-AVX1-NEXT: vpunpcklqdq %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0x6c,0xc0]
5565 ; X32-AVX1-NEXT: # xmm0 = xmm1[0],xmm0[0]
5566 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
5568 ; X32-AVX512-LABEL: test_mm_setr_epi64x:
5569 ; X32-AVX512: # %bb.0:
5570 ; X32-AVX512-NEXT: vmovq %rsi, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xc6]
5571 ; X32-AVX512-NEXT: vmovq %rdi, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xcf]
5572 ; X32-AVX512-NEXT: vpunpcklqdq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x6c,0xc0]
5573 ; X32-AVX512-NEXT: # xmm0 = xmm1[0],xmm0[0]
5574 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
5575 %res0 = insertelement <2 x i64> undef, i64 %a0, i32 0
5576 %res1 = insertelement <2 x i64> %res0, i64 %a1, i32 1
5580 define <2 x double> @test_mm_setr_pd(double %a0, double %a1) nounwind {
5581 ; X86-SSE-LABEL: test_mm_setr_pd:
5583 ; X86-SSE-NEXT: movsd {{[0-9]+}}(%esp), %xmm1 # encoding: [0xf2,0x0f,0x10,0x4c,0x24,0x0c]
5584 ; X86-SSE-NEXT: # xmm1 = mem[0],zero
5585 ; X86-SSE-NEXT: movsd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf2,0x0f,0x10,0x44,0x24,0x04]
5586 ; X86-SSE-NEXT: # xmm0 = mem[0],zero
5587 ; X86-SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
5588 ; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
5589 ; X86-SSE-NEXT: retl # encoding: [0xc3]
5591 ; X86-AVX1-LABEL: test_mm_setr_pd:
5592 ; X86-AVX1: # %bb.0:
5593 ; X86-AVX1-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfb,0x10,0x44,0x24,0x0c]
5594 ; X86-AVX1-NEXT: # xmm0 = mem[0],zero
5595 ; X86-AVX1-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm1 # encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x04]
5596 ; X86-AVX1-NEXT: # xmm1 = mem[0],zero
5597 ; X86-AVX1-NEXT: vmovlhps %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf0,0x16,0xc0]
5598 ; X86-AVX1-NEXT: # xmm0 = xmm1[0],xmm0[0]
5599 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
5601 ; X86-AVX512-LABEL: test_mm_setr_pd:
5602 ; X86-AVX512: # %bb.0:
5603 ; X86-AVX512-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x44,0x24,0x0c]
5604 ; X86-AVX512-NEXT: # xmm0 = mem[0],zero
5605 ; X86-AVX512-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x04]
5606 ; X86-AVX512-NEXT: # xmm1 = mem[0],zero
5607 ; X86-AVX512-NEXT: vmovlhps %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf0,0x16,0xc0]
5608 ; X86-AVX512-NEXT: # xmm0 = xmm1[0],xmm0[0]
5609 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
5611 ; X64-SSE-LABEL: test_mm_setr_pd:
5613 ; X64-SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
5614 ; X64-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
5615 ; X64-SSE-NEXT: retq # encoding: [0xc3]
5617 ; X64-AVX1-LABEL: test_mm_setr_pd:
5618 ; X64-AVX1: # %bb.0:
5619 ; X64-AVX1-NEXT: vmovlhps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x16,0xc1]
5620 ; X64-AVX1-NEXT: # xmm0 = xmm0[0],xmm1[0]
5621 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
5623 ; X64-AVX512-LABEL: test_mm_setr_pd:
5624 ; X64-AVX512: # %bb.0:
5625 ; X64-AVX512-NEXT: vmovlhps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x16,0xc1]
5626 ; X64-AVX512-NEXT: # xmm0 = xmm0[0],xmm1[0]
5627 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
5629 ; X32-SSE-LABEL: test_mm_setr_pd:
5631 ; X32-SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
5632 ; X32-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
5633 ; X32-SSE-NEXT: retq # encoding: [0xc3]
5635 ; X32-AVX1-LABEL: test_mm_setr_pd:
5636 ; X32-AVX1: # %bb.0:
5637 ; X32-AVX1-NEXT: vmovlhps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x16,0xc1]
5638 ; X32-AVX1-NEXT: # xmm0 = xmm0[0],xmm1[0]
5639 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
5641 ; X32-AVX512-LABEL: test_mm_setr_pd:
5642 ; X32-AVX512: # %bb.0:
5643 ; X32-AVX512-NEXT: vmovlhps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x16,0xc1]
5644 ; X32-AVX512-NEXT: # xmm0 = xmm0[0],xmm1[0]
5645 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
5646 %res0 = insertelement <2 x double> undef, double %a0, i32 0
5647 %res1 = insertelement <2 x double> %res0, double %a1, i32 1
5648 ret <2 x double> %res1
5651 define <2 x double> @test_mm_setzero_pd() {
5652 ; SSE-LABEL: test_mm_setzero_pd:
5654 ; SSE-NEXT: xorps %xmm0, %xmm0 # encoding: [0x0f,0x57,0xc0]
5655 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5657 ; AVX1-LABEL: test_mm_setzero_pd:
5659 ; AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x57,0xc0]
5660 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5662 ; AVX512-LABEL: test_mm_setzero_pd:
5664 ; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x57,0xc0]
5665 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5666 ret <2 x double> zeroinitializer
5669 define <2 x i64> @test_mm_setzero_si128() {
5670 ; SSE-LABEL: test_mm_setzero_si128:
5672 ; SSE-NEXT: xorps %xmm0, %xmm0 # encoding: [0x0f,0x57,0xc0]
5673 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5675 ; AVX1-LABEL: test_mm_setzero_si128:
5677 ; AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x57,0xc0]
5678 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5680 ; AVX512-LABEL: test_mm_setzero_si128:
5682 ; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x57,0xc0]
5683 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5684 ret <2 x i64> zeroinitializer
5687 define <2 x i64> @test_mm_shuffle_epi32(<2 x i64> %a0) {
5688 ; SSE-LABEL: test_mm_shuffle_epi32:
5690 ; SSE-NEXT: pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
5691 ; SSE-NEXT: # xmm0 = xmm0[0,0,0,0]
5692 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5694 ; AVX1-LABEL: test_mm_shuffle_epi32:
5696 ; AVX1-NEXT: vpermilps $0, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x04,0xc0,0x00]
5697 ; AVX1-NEXT: # xmm0 = xmm0[0,0,0,0]
5698 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5700 ; AVX512-LABEL: test_mm_shuffle_epi32:
5702 ; AVX512-NEXT: vbroadcastss %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0xc0]
5703 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5704 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
5705 %res = shufflevector <4 x i32> %arg0, <4 x i32> undef, <4 x i32> zeroinitializer
5706 %bc = bitcast <4 x i32> %res to <2 x i64>
5710 define <2 x double> @test_mm_shuffle_pd(<2 x double> %a0, <2 x double> %a1) {
5711 ; SSE-LABEL: test_mm_shuffle_pd:
5713 ; SSE-NEXT: shufps $78, %xmm1, %xmm0 # encoding: [0x0f,0xc6,0xc1,0x4e]
5714 ; SSE-NEXT: # xmm0 = xmm0[2,3],xmm1[0,1]
5715 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5717 ; AVX1-LABEL: test_mm_shuffle_pd:
5719 ; AVX1-NEXT: vshufpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc6,0xc1,0x01]
5720 ; AVX1-NEXT: # xmm0 = xmm0[1],xmm1[0]
5721 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5723 ; AVX512-LABEL: test_mm_shuffle_pd:
5725 ; AVX512-NEXT: vshufpd $1, %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc6,0xc1,0x01]
5726 ; AVX512-NEXT: # xmm0 = xmm0[1],xmm1[0]
5727 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5728 %res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 2>
5729 ret <2 x double> %res
5732 define <2 x i64> @test_mm_shufflehi_epi16(<2 x i64> %a0) {
5733 ; SSE-LABEL: test_mm_shufflehi_epi16:
5735 ; SSE-NEXT: pshufhw $0, %xmm0, %xmm0 # encoding: [0xf3,0x0f,0x70,0xc0,0x00]
5736 ; SSE-NEXT: # xmm0 = xmm0[0,1,2,3,4,4,4,4]
5737 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5739 ; AVX1-LABEL: test_mm_shufflehi_epi16:
5741 ; AVX1-NEXT: vpshufhw $0, %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x70,0xc0,0x00]
5742 ; AVX1-NEXT: # xmm0 = xmm0[0,1,2,3,4,4,4,4]
5743 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5745 ; AVX512-LABEL: test_mm_shufflehi_epi16:
5747 ; AVX512-NEXT: vpshufhw $0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x70,0xc0,0x00]
5748 ; AVX512-NEXT: # xmm0 = xmm0[0,1,2,3,4,4,4,4]
5749 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5750 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
5751 %res = shufflevector <8 x i16> %arg0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
5752 %bc = bitcast <8 x i16> %res to <2 x i64>
5756 define <2 x i64> @test_mm_shufflelo_epi16(<2 x i64> %a0) {
5757 ; SSE-LABEL: test_mm_shufflelo_epi16:
5759 ; SSE-NEXT: pshuflw $0, %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x70,0xc0,0x00]
5760 ; SSE-NEXT: # xmm0 = xmm0[0,0,0,0,4,5,6,7]
5761 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5763 ; AVX1-LABEL: test_mm_shufflelo_epi16:
5765 ; AVX1-NEXT: vpshuflw $0, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x70,0xc0,0x00]
5766 ; AVX1-NEXT: # xmm0 = xmm0[0,0,0,0,4,5,6,7]
5767 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5769 ; AVX512-LABEL: test_mm_shufflelo_epi16:
5771 ; AVX512-NEXT: vpshuflw $0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x70,0xc0,0x00]
5772 ; AVX512-NEXT: # xmm0 = xmm0[0,0,0,0,4,5,6,7]
5773 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5774 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
5775 %res = shufflevector <8 x i16> %arg0, <8 x i16> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7>
5776 %bc = bitcast <8 x i16> %res to <2 x i64>
5780 define <2 x i64> @test_mm_sll_epi16(<2 x i64> %a0, <2 x i64> %a1) {
5781 ; SSE-LABEL: test_mm_sll_epi16:
5783 ; SSE-NEXT: psllw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf1,0xc1]
5784 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5786 ; AVX1-LABEL: test_mm_sll_epi16:
5788 ; AVX1-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf1,0xc1]
5789 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5791 ; AVX512-LABEL: test_mm_sll_epi16:
5793 ; AVX512-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf1,0xc1]
5794 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5795 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
5796 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
5797 %res = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %arg0, <8 x i16> %arg1)
5798 %bc = bitcast <8 x i16> %res to <2 x i64>
5801 declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) nounwind readnone
5803 define <2 x i64> @test_mm_sll_epi32(<2 x i64> %a0, <2 x i64> %a1) {
5804 ; SSE-LABEL: test_mm_sll_epi32:
5806 ; SSE-NEXT: pslld %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf2,0xc1]
5807 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5809 ; AVX1-LABEL: test_mm_sll_epi32:
5811 ; AVX1-NEXT: vpslld %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf2,0xc1]
5812 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5814 ; AVX512-LABEL: test_mm_sll_epi32:
5816 ; AVX512-NEXT: vpslld %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf2,0xc1]
5817 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5818 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
5819 %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
5820 %res = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %arg0, <4 x i32> %arg1)
5821 %bc = bitcast <4 x i32> %res to <2 x i64>
5824 declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone
5826 define <2 x i64> @test_mm_sll_epi64(<2 x i64> %a0, <2 x i64> %a1) {
5827 ; SSE-LABEL: test_mm_sll_epi64:
5829 ; SSE-NEXT: psllq %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf3,0xc1]
5830 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5832 ; AVX1-LABEL: test_mm_sll_epi64:
5834 ; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf3,0xc1]
5835 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5837 ; AVX512-LABEL: test_mm_sll_epi64:
5839 ; AVX512-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf3,0xc1]
5840 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5841 %res = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1)
5844 declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone
5846 define <2 x i64> @test_mm_slli_epi16(<2 x i64> %a0) {
5847 ; SSE-LABEL: test_mm_slli_epi16:
5849 ; SSE-NEXT: psllw $1, %xmm0 # encoding: [0x66,0x0f,0x71,0xf0,0x01]
5850 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5852 ; AVX1-LABEL: test_mm_slli_epi16:
5854 ; AVX1-NEXT: vpsllw $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xf0,0x01]
5855 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5857 ; AVX512-LABEL: test_mm_slli_epi16:
5859 ; AVX512-NEXT: vpsllw $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x01]
5860 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5861 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
5862 %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %arg0, i32 1)
5863 %bc = bitcast <8 x i16> %res to <2 x i64>
5866 declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone
5868 define <2 x i64> @test_mm_slli_epi32(<2 x i64> %a0) {
5869 ; SSE-LABEL: test_mm_slli_epi32:
5871 ; SSE-NEXT: pslld $1, %xmm0 # encoding: [0x66,0x0f,0x72,0xf0,0x01]
5872 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5874 ; AVX1-LABEL: test_mm_slli_epi32:
5876 ; AVX1-NEXT: vpslld $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xf0,0x01]
5877 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5879 ; AVX512-LABEL: test_mm_slli_epi32:
5881 ; AVX512-NEXT: vpslld $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x01]
5882 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5883 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
5884 %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %arg0, i32 1)
5885 %bc = bitcast <4 x i32> %res to <2 x i64>
5888 declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) nounwind readnone
5890 define <2 x i64> @test_mm_slli_epi64(<2 x i64> %a0) {
5891 ; SSE-LABEL: test_mm_slli_epi64:
5893 ; SSE-NEXT: psllq $1, %xmm0 # encoding: [0x66,0x0f,0x73,0xf0,0x01]
5894 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5896 ; AVX1-LABEL: test_mm_slli_epi64:
5898 ; AVX1-NEXT: vpsllq $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf0,0x01]
5899 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5901 ; AVX512-LABEL: test_mm_slli_epi64:
5903 ; AVX512-NEXT: vpsllq $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x01]
5904 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5905 %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 1)
5908 declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) nounwind readnone
5910 define <2 x i64> @test_mm_slli_si128(<2 x i64> %a0) nounwind {
5911 ; SSE-LABEL: test_mm_slli_si128:
5913 ; SSE-NEXT: pslldq $5, %xmm0 # encoding: [0x66,0x0f,0x73,0xf8,0x05]
5914 ; SSE-NEXT: # xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
5915 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5917 ; AVX1-LABEL: test_mm_slli_si128:
5919 ; AVX1-NEXT: vpslldq $5, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf8,0x05]
5920 ; AVX1-NEXT: # xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
5921 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5923 ; AVX512-LABEL: test_mm_slli_si128:
5925 ; AVX512-NEXT: vpslldq $5, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf8,0x05]
5926 ; AVX512-NEXT: # xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
5927 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5928 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
5929 %res = shufflevector <16 x i8> zeroinitializer, <16 x i8> %arg0, <16 x i32> <i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26>
5930 %bc = bitcast <16 x i8> %res to <2 x i64>
5934 define <2 x double> @test_mm_sqrt_pd(<2 x double> %a0) nounwind {
5935 ; SSE-LABEL: test_mm_sqrt_pd:
5937 ; SSE-NEXT: sqrtpd %xmm0, %xmm0 # encoding: [0x66,0x0f,0x51,0xc0]
5938 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5940 ; AVX1-LABEL: test_mm_sqrt_pd:
5942 ; AVX1-NEXT: vsqrtpd %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x51,0xc0]
5943 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5945 ; AVX512-LABEL: test_mm_sqrt_pd:
5947 ; AVX512-NEXT: vsqrtpd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x51,0xc0]
5948 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5949 %res = call <2 x double> @llvm.sqrt.v2f64(<2 x double> %a0)
5950 ret <2 x double> %res
5952 declare <2 x double> @llvm.sqrt.v2f64(<2 x double>) nounwind readnone
5954 define <2 x double> @test_mm_sqrt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
5955 ; SSE-LABEL: test_mm_sqrt_sd:
5957 ; SSE-NEXT: sqrtsd %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x51,0xc8]
5958 ; SSE-NEXT: movapd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x28,0xc1]
5959 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5961 ; AVX1-LABEL: test_mm_sqrt_sd:
5963 ; AVX1-NEXT: vsqrtsd %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf3,0x51,0xc0]
5964 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5966 ; AVX512-LABEL: test_mm_sqrt_sd:
5968 ; AVX512-NEXT: vsqrtsd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf3,0x51,0xc0]
5969 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
5970 %ext = extractelement <2 x double> %a0, i32 0
5971 %sqrt = call double @llvm.sqrt.f64(double %ext)
5972 %ins = insertelement <2 x double> %a1, double %sqrt, i32 0
5973 ret <2 x double> %ins
5975 declare double @llvm.sqrt.f64(double) nounwind readnone
5977 ; This doesn't match a clang test, but helps with fast-isel coverage.
5978 define double @test_mm_sqrt_sd_scalar(double %a0) nounwind {
5979 ; X86-SSE-LABEL: test_mm_sqrt_sd_scalar:
5981 ; X86-SSE-NEXT: pushl %ebp # encoding: [0x55]
5982 ; X86-SSE-NEXT: movl %esp, %ebp # encoding: [0x89,0xe5]
5983 ; X86-SSE-NEXT: andl $-8, %esp # encoding: [0x83,0xe4,0xf8]
5984 ; X86-SSE-NEXT: subl $8, %esp # encoding: [0x83,0xec,0x08]
5985 ; X86-SSE-NEXT: movsd 8(%ebp), %xmm0 # encoding: [0xf2,0x0f,0x10,0x45,0x08]
5986 ; X86-SSE-NEXT: # xmm0 = mem[0],zero
5987 ; X86-SSE-NEXT: sqrtsd %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x51,0xc0]
5988 ; X86-SSE-NEXT: movsd %xmm0, (%esp) # encoding: [0xf2,0x0f,0x11,0x04,0x24]
5989 ; X86-SSE-NEXT: fldl (%esp) # encoding: [0xdd,0x04,0x24]
5990 ; X86-SSE-NEXT: movl %ebp, %esp # encoding: [0x89,0xec]
5991 ; X86-SSE-NEXT: popl %ebp # encoding: [0x5d]
5992 ; X86-SSE-NEXT: retl # encoding: [0xc3]
5994 ; X86-AVX1-LABEL: test_mm_sqrt_sd_scalar:
5995 ; X86-AVX1: # %bb.0:
5996 ; X86-AVX1-NEXT: pushl %ebp # encoding: [0x55]
5997 ; X86-AVX1-NEXT: movl %esp, %ebp # encoding: [0x89,0xe5]
5998 ; X86-AVX1-NEXT: andl $-8, %esp # encoding: [0x83,0xe4,0xf8]
5999 ; X86-AVX1-NEXT: subl $8, %esp # encoding: [0x83,0xec,0x08]
6000 ; X86-AVX1-NEXT: vmovsd 8(%ebp), %xmm0 # encoding: [0xc5,0xfb,0x10,0x45,0x08]
6001 ; X86-AVX1-NEXT: # xmm0 = mem[0],zero
6002 ; X86-AVX1-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x51,0xc0]
6003 ; X86-AVX1-NEXT: vmovsd %xmm0, (%esp) # encoding: [0xc5,0xfb,0x11,0x04,0x24]
6004 ; X86-AVX1-NEXT: fldl (%esp) # encoding: [0xdd,0x04,0x24]
6005 ; X86-AVX1-NEXT: movl %ebp, %esp # encoding: [0x89,0xec]
6006 ; X86-AVX1-NEXT: popl %ebp # encoding: [0x5d]
6007 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
6009 ; X86-AVX512-LABEL: test_mm_sqrt_sd_scalar:
6010 ; X86-AVX512: # %bb.0:
6011 ; X86-AVX512-NEXT: pushl %ebp # encoding: [0x55]
6012 ; X86-AVX512-NEXT: movl %esp, %ebp # encoding: [0x89,0xe5]
6013 ; X86-AVX512-NEXT: andl $-8, %esp # encoding: [0x83,0xe4,0xf8]
6014 ; X86-AVX512-NEXT: subl $8, %esp # encoding: [0x83,0xec,0x08]
6015 ; X86-AVX512-NEXT: vmovsd 8(%ebp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x45,0x08]
6016 ; X86-AVX512-NEXT: # xmm0 = mem[0],zero
6017 ; X86-AVX512-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0]
6018 ; X86-AVX512-NEXT: vmovsd %xmm0, (%esp) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x04,0x24]
6019 ; X86-AVX512-NEXT: fldl (%esp) # encoding: [0xdd,0x04,0x24]
6020 ; X86-AVX512-NEXT: movl %ebp, %esp # encoding: [0x89,0xec]
6021 ; X86-AVX512-NEXT: popl %ebp # encoding: [0x5d]
6022 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
6024 ; X64-SSE-LABEL: test_mm_sqrt_sd_scalar:
6026 ; X64-SSE-NEXT: sqrtsd %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x51,0xc0]
6027 ; X64-SSE-NEXT: retq # encoding: [0xc3]
6029 ; X64-AVX1-LABEL: test_mm_sqrt_sd_scalar:
6030 ; X64-AVX1: # %bb.0:
6031 ; X64-AVX1-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x51,0xc0]
6032 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
6034 ; X64-AVX512-LABEL: test_mm_sqrt_sd_scalar:
6035 ; X64-AVX512: # %bb.0:
6036 ; X64-AVX512-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0]
6037 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
6039 ; X32-SSE-LABEL: test_mm_sqrt_sd_scalar:
6041 ; X32-SSE-NEXT: sqrtsd %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x51,0xc0]
6042 ; X32-SSE-NEXT: retq # encoding: [0xc3]
6044 ; X32-AVX1-LABEL: test_mm_sqrt_sd_scalar:
6045 ; X32-AVX1: # %bb.0:
6046 ; X32-AVX1-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x51,0xc0]
6047 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
6049 ; X32-AVX512-LABEL: test_mm_sqrt_sd_scalar:
6050 ; X32-AVX512: # %bb.0:
6051 ; X32-AVX512-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0]
6052 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
6053 %sqrt = call double @llvm.sqrt.f64(double %a0)
6057 define <2 x i64> @test_mm_sra_epi16(<2 x i64> %a0, <2 x i64> %a1) {
6058 ; SSE-LABEL: test_mm_sra_epi16:
6060 ; SSE-NEXT: psraw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xe1,0xc1]
6061 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6063 ; AVX1-LABEL: test_mm_sra_epi16:
6065 ; AVX1-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe1,0xc1]
6066 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6068 ; AVX512-LABEL: test_mm_sra_epi16:
6070 ; AVX512-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe1,0xc1]
6071 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6072 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
6073 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
6074 %res = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %arg0, <8 x i16> %arg1)
6075 %bc = bitcast <8 x i16> %res to <2 x i64>
6078 declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>) nounwind readnone
6080 define <2 x i64> @test_mm_sra_epi32(<2 x i64> %a0, <2 x i64> %a1) {
6081 ; SSE-LABEL: test_mm_sra_epi32:
6083 ; SSE-NEXT: psrad %xmm1, %xmm0 # encoding: [0x66,0x0f,0xe2,0xc1]
6084 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6086 ; AVX1-LABEL: test_mm_sra_epi32:
6088 ; AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe2,0xc1]
6089 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6091 ; AVX512-LABEL: test_mm_sra_epi32:
6093 ; AVX512-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe2,0xc1]
6094 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6095 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
6096 %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
6097 %res = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %arg0, <4 x i32> %arg1)
6098 %bc = bitcast <4 x i32> %res to <2 x i64>
6101 declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>) nounwind readnone
6103 define <2 x i64> @test_mm_srai_epi16(<2 x i64> %a0) {
6104 ; SSE-LABEL: test_mm_srai_epi16:
6106 ; SSE-NEXT: psraw $1, %xmm0 # encoding: [0x66,0x0f,0x71,0xe0,0x01]
6107 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6109 ; AVX1-LABEL: test_mm_srai_epi16:
6111 ; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xe0,0x01]
6112 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6114 ; AVX512-LABEL: test_mm_srai_epi16:
6116 ; AVX512-NEXT: vpsraw $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xe0,0x01]
6117 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6118 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
6119 %res = call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %arg0, i32 1)
6120 %bc = bitcast <8 x i16> %res to <2 x i64>
6123 declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32) nounwind readnone
6125 define <2 x i64> @test_mm_srai_epi32(<2 x i64> %a0) {
6126 ; SSE-LABEL: test_mm_srai_epi32:
6128 ; SSE-NEXT: psrad $1, %xmm0 # encoding: [0x66,0x0f,0x72,0xe0,0x01]
6129 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6131 ; AVX1-LABEL: test_mm_srai_epi32:
6133 ; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xe0,0x01]
6134 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6136 ; AVX512-LABEL: test_mm_srai_epi32:
6138 ; AVX512-NEXT: vpsrad $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xe0,0x01]
6139 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6140 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
6141 %res = call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %arg0, i32 1)
6142 %bc = bitcast <4 x i32> %res to <2 x i64>
6145 declare <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32>, i32) nounwind readnone
6147 define <2 x i64> @test_mm_srl_epi16(<2 x i64> %a0, <2 x i64> %a1) {
6148 ; SSE-LABEL: test_mm_srl_epi16:
6150 ; SSE-NEXT: psrlw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd1,0xc1]
6151 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6153 ; AVX1-LABEL: test_mm_srl_epi16:
6155 ; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd1,0xc1]
6156 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6158 ; AVX512-LABEL: test_mm_srl_epi16:
6160 ; AVX512-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd1,0xc1]
6161 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6162 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
6163 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
6164 %res = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %arg0, <8 x i16> %arg1)
6165 %bc = bitcast <8 x i16> %res to <2 x i64>
6168 declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
6170 define <2 x i64> @test_mm_srl_epi32(<2 x i64> %a0, <2 x i64> %a1) {
6171 ; SSE-LABEL: test_mm_srl_epi32:
6173 ; SSE-NEXT: psrld %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd2,0xc1]
6174 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6176 ; AVX1-LABEL: test_mm_srl_epi32:
6178 ; AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd2,0xc1]
6179 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6181 ; AVX512-LABEL: test_mm_srl_epi32:
6183 ; AVX512-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd2,0xc1]
6184 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6185 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
6186 %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
6187 %res = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %arg0, <4 x i32> %arg1)
6188 %bc = bitcast <4 x i32> %res to <2 x i64>
6191 declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone
6193 define <2 x i64> @test_mm_srl_epi64(<2 x i64> %a0, <2 x i64> %a1) {
6194 ; SSE-LABEL: test_mm_srl_epi64:
6196 ; SSE-NEXT: psrlq %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd3,0xc1]
6197 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6199 ; AVX1-LABEL: test_mm_srl_epi64:
6201 ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd3,0xc1]
6202 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6204 ; AVX512-LABEL: test_mm_srl_epi64:
6206 ; AVX512-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd3,0xc1]
6207 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6208 %res = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1)
6211 declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone
6213 define <2 x i64> @test_mm_srli_epi16(<2 x i64> %a0) {
6214 ; SSE-LABEL: test_mm_srli_epi16:
6216 ; SSE-NEXT: psrlw $1, %xmm0 # encoding: [0x66,0x0f,0x71,0xd0,0x01]
6217 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6219 ; AVX1-LABEL: test_mm_srli_epi16:
6221 ; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xd0,0x01]
6222 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6224 ; AVX512-LABEL: test_mm_srli_epi16:
6226 ; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xd0,0x01]
6227 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6228 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
6229 %res = call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> %arg0, i32 1)
6230 %bc = bitcast <8 x i16> %res to <2 x i64>
6233 declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32) nounwind readnone
6235 define <2 x i64> @test_mm_srli_epi32(<2 x i64> %a0) {
6236 ; SSE-LABEL: test_mm_srli_epi32:
6238 ; SSE-NEXT: psrld $1, %xmm0 # encoding: [0x66,0x0f,0x72,0xd0,0x01]
6239 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6241 ; AVX1-LABEL: test_mm_srli_epi32:
6243 ; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xd0,0x01]
6244 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6246 ; AVX512-LABEL: test_mm_srli_epi32:
6248 ; AVX512-NEXT: vpsrld $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xd0,0x01]
6249 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6250 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
6251 %res = call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> %arg0, i32 1)
6252 %bc = bitcast <4 x i32> %res to <2 x i64>
6255 declare <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32>, i32) nounwind readnone
6257 define <2 x i64> @test_mm_srli_epi64(<2 x i64> %a0) {
6258 ; SSE-LABEL: test_mm_srli_epi64:
6260 ; SSE-NEXT: psrlq $1, %xmm0 # encoding: [0x66,0x0f,0x73,0xd0,0x01]
6261 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6263 ; AVX1-LABEL: test_mm_srli_epi64:
6265 ; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xd0,0x01]
6266 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6268 ; AVX512-LABEL: test_mm_srli_epi64:
6270 ; AVX512-NEXT: vpsrlq $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xd0,0x01]
6271 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6272 %res = call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %a0, i32 1)
6275 declare <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64>, i32) nounwind readnone
6277 define <2 x i64> @test_mm_srli_si128(<2 x i64> %a0) nounwind {
6278 ; SSE-LABEL: test_mm_srli_si128:
6280 ; SSE-NEXT: psrldq $5, %xmm0 # encoding: [0x66,0x0f,0x73,0xd8,0x05]
6281 ; SSE-NEXT: # xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
6282 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6284 ; AVX1-LABEL: test_mm_srli_si128:
6286 ; AVX1-NEXT: vpsrldq $5, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xd8,0x05]
6287 ; AVX1-NEXT: # xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
6288 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6290 ; AVX512-LABEL: test_mm_srli_si128:
6292 ; AVX512-NEXT: vpsrldq $5, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xd8,0x05]
6293 ; AVX512-NEXT: # xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
6294 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
6295 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
6296 %res = shufflevector <16 x i8> %arg0, <16 x i8> zeroinitializer, <16 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20>
6297 %bc = bitcast <16 x i8> %res to <2 x i64>
6301 define void @test_mm_store_pd(double *%a0, <2 x double> %a1) {
6302 ; X86-SSE-LABEL: test_mm_store_pd:
6304 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6305 ; X86-SSE-NEXT: movaps %xmm0, (%eax) # encoding: [0x0f,0x29,0x00]
6306 ; X86-SSE-NEXT: retl # encoding: [0xc3]
6308 ; X86-AVX1-LABEL: test_mm_store_pd:
6309 ; X86-AVX1: # %bb.0:
6310 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6311 ; X86-AVX1-NEXT: vmovaps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x29,0x00]
6312 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
6314 ; X86-AVX512-LABEL: test_mm_store_pd:
6315 ; X86-AVX512: # %bb.0:
6316 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6317 ; X86-AVX512-NEXT: vmovaps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x00]
6318 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
6320 ; X64-SSE-LABEL: test_mm_store_pd:
6322 ; X64-SSE-NEXT: movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
6323 ; X64-SSE-NEXT: retq # encoding: [0xc3]
6325 ; X64-AVX1-LABEL: test_mm_store_pd:
6326 ; X64-AVX1: # %bb.0:
6327 ; X64-AVX1-NEXT: vmovaps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x29,0x07]
6328 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
6330 ; X64-AVX512-LABEL: test_mm_store_pd:
6331 ; X64-AVX512: # %bb.0:
6332 ; X64-AVX512-NEXT: vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
6333 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
6335 ; X32-SSE-LABEL: test_mm_store_pd:
6337 ; X32-SSE-NEXT: movaps %xmm0, (%edi) # encoding: [0x67,0x0f,0x29,0x07]
6338 ; X32-SSE-NEXT: retq # encoding: [0xc3]
6340 ; X32-AVX1-LABEL: test_mm_store_pd:
6341 ; X32-AVX1: # %bb.0:
6342 ; X32-AVX1-NEXT: vmovaps %xmm0, (%edi) # encoding: [0x67,0xc5,0xf8,0x29,0x07]
6343 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
6345 ; X32-AVX512-LABEL: test_mm_store_pd:
6346 ; X32-AVX512: # %bb.0:
6347 ; X32-AVX512-NEXT: vmovaps %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x29,0x07]
6348 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
6349 %arg0 = bitcast double* %a0 to <2 x double>*
6350 store <2 x double> %a1, <2 x double>* %arg0, align 16
6354 define void @test_mm_store_pd1(double *%a0, <2 x double> %a1) {
6355 ; X86-SSE-LABEL: test_mm_store_pd1:
6357 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6358 ; X86-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
6359 ; X86-SSE-NEXT: # xmm0 = xmm0[0,0]
6360 ; X86-SSE-NEXT: movaps %xmm0, (%eax) # encoding: [0x0f,0x29,0x00]
6361 ; X86-SSE-NEXT: retl # encoding: [0xc3]
6363 ; X86-AVX1-LABEL: test_mm_store_pd1:
6364 ; X86-AVX1: # %bb.0:
6365 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6366 ; X86-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
6367 ; X86-AVX1-NEXT: # xmm0 = xmm0[0,0]
6368 ; X86-AVX1-NEXT: vmovaps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x29,0x00]
6369 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
6371 ; X86-AVX512-LABEL: test_mm_store_pd1:
6372 ; X86-AVX512: # %bb.0:
6373 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6374 ; X86-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
6375 ; X86-AVX512-NEXT: # xmm0 = xmm0[0,0]
6376 ; X86-AVX512-NEXT: vmovaps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x00]
6377 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
6379 ; X64-SSE-LABEL: test_mm_store_pd1:
6381 ; X64-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
6382 ; X64-SSE-NEXT: # xmm0 = xmm0[0,0]
6383 ; X64-SSE-NEXT: movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
6384 ; X64-SSE-NEXT: retq # encoding: [0xc3]
6386 ; X64-AVX1-LABEL: test_mm_store_pd1:
6387 ; X64-AVX1: # %bb.0:
6388 ; X64-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
6389 ; X64-AVX1-NEXT: # xmm0 = xmm0[0,0]
6390 ; X64-AVX1-NEXT: vmovaps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x29,0x07]
6391 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
6393 ; X64-AVX512-LABEL: test_mm_store_pd1:
6394 ; X64-AVX512: # %bb.0:
6395 ; X64-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
6396 ; X64-AVX512-NEXT: # xmm0 = xmm0[0,0]
6397 ; X64-AVX512-NEXT: vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
6398 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
6400 ; X32-SSE-LABEL: test_mm_store_pd1:
6402 ; X32-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
6403 ; X32-SSE-NEXT: # xmm0 = xmm0[0,0]
6404 ; X32-SSE-NEXT: movaps %xmm0, (%edi) # encoding: [0x67,0x0f,0x29,0x07]
6405 ; X32-SSE-NEXT: retq # encoding: [0xc3]
6407 ; X32-AVX1-LABEL: test_mm_store_pd1:
6408 ; X32-AVX1: # %bb.0:
6409 ; X32-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
6410 ; X32-AVX1-NEXT: # xmm0 = xmm0[0,0]
6411 ; X32-AVX1-NEXT: vmovaps %xmm0, (%edi) # encoding: [0x67,0xc5,0xf8,0x29,0x07]
6412 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
6414 ; X32-AVX512-LABEL: test_mm_store_pd1:
6415 ; X32-AVX512: # %bb.0:
6416 ; X32-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
6417 ; X32-AVX512-NEXT: # xmm0 = xmm0[0,0]
6418 ; X32-AVX512-NEXT: vmovaps %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x29,0x07]
6419 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
6420 %arg0 = bitcast double * %a0 to <2 x double>*
6421 %shuf = shufflevector <2 x double> %a1, <2 x double> undef, <2 x i32> zeroinitializer
6422 store <2 x double> %shuf, <2 x double>* %arg0, align 16
6426 define void @test_mm_store_sd(double *%a0, <2 x double> %a1) {
6427 ; X86-SSE-LABEL: test_mm_store_sd:
6429 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6430 ; X86-SSE-NEXT: movsd %xmm0, (%eax) # encoding: [0xf2,0x0f,0x11,0x00]
6431 ; X86-SSE-NEXT: retl # encoding: [0xc3]
6433 ; X86-AVX1-LABEL: test_mm_store_sd:
6434 ; X86-AVX1: # %bb.0:
6435 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6436 ; X86-AVX1-NEXT: vmovsd %xmm0, (%eax) # encoding: [0xc5,0xfb,0x11,0x00]
6437 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
6439 ; X86-AVX512-LABEL: test_mm_store_sd:
6440 ; X86-AVX512: # %bb.0:
6441 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6442 ; X86-AVX512-NEXT: vmovsd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x00]
6443 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
6445 ; X64-SSE-LABEL: test_mm_store_sd:
6447 ; X64-SSE-NEXT: movsd %xmm0, (%rdi) # encoding: [0xf2,0x0f,0x11,0x07]
6448 ; X64-SSE-NEXT: retq # encoding: [0xc3]
6450 ; X64-AVX1-LABEL: test_mm_store_sd:
6451 ; X64-AVX1: # %bb.0:
6452 ; X64-AVX1-NEXT: vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
6453 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
6455 ; X64-AVX512-LABEL: test_mm_store_sd:
6456 ; X64-AVX512: # %bb.0:
6457 ; X64-AVX512-NEXT: vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
6458 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
6460 ; X32-SSE-LABEL: test_mm_store_sd:
6462 ; X32-SSE-NEXT: movsd %xmm0, (%edi) # encoding: [0x67,0xf2,0x0f,0x11,0x07]
6463 ; X32-SSE-NEXT: retq # encoding: [0xc3]
6465 ; X32-AVX1-LABEL: test_mm_store_sd:
6466 ; X32-AVX1: # %bb.0:
6467 ; X32-AVX1-NEXT: vmovsd %xmm0, (%edi) # encoding: [0x67,0xc5,0xfb,0x11,0x07]
6468 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
6470 ; X32-AVX512-LABEL: test_mm_store_sd:
6471 ; X32-AVX512: # %bb.0:
6472 ; X32-AVX512-NEXT: vmovsd %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x11,0x07]
6473 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
6474 %ext = extractelement <2 x double> %a1, i32 0
6475 store double %ext, double* %a0, align 1
6479 define void @test_mm_store_si128(<2 x i64> *%a0, <2 x i64> %a1) {
6480 ; X86-SSE-LABEL: test_mm_store_si128:
6482 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6483 ; X86-SSE-NEXT: movaps %xmm0, (%eax) # encoding: [0x0f,0x29,0x00]
6484 ; X86-SSE-NEXT: retl # encoding: [0xc3]
6486 ; X86-AVX1-LABEL: test_mm_store_si128:
6487 ; X86-AVX1: # %bb.0:
6488 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6489 ; X86-AVX1-NEXT: vmovaps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x29,0x00]
6490 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
6492 ; X86-AVX512-LABEL: test_mm_store_si128:
6493 ; X86-AVX512: # %bb.0:
6494 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6495 ; X86-AVX512-NEXT: vmovaps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x00]
6496 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
6498 ; X64-SSE-LABEL: test_mm_store_si128:
6500 ; X64-SSE-NEXT: movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
6501 ; X64-SSE-NEXT: retq # encoding: [0xc3]
6503 ; X64-AVX1-LABEL: test_mm_store_si128:
6504 ; X64-AVX1: # %bb.0:
6505 ; X64-AVX1-NEXT: vmovaps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x29,0x07]
6506 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
6508 ; X64-AVX512-LABEL: test_mm_store_si128:
6509 ; X64-AVX512: # %bb.0:
6510 ; X64-AVX512-NEXT: vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
6511 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
6513 ; X32-SSE-LABEL: test_mm_store_si128:
6515 ; X32-SSE-NEXT: movaps %xmm0, (%edi) # encoding: [0x67,0x0f,0x29,0x07]
6516 ; X32-SSE-NEXT: retq # encoding: [0xc3]
6518 ; X32-AVX1-LABEL: test_mm_store_si128:
6519 ; X32-AVX1: # %bb.0:
6520 ; X32-AVX1-NEXT: vmovaps %xmm0, (%edi) # encoding: [0x67,0xc5,0xf8,0x29,0x07]
6521 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
6523 ; X32-AVX512-LABEL: test_mm_store_si128:
6524 ; X32-AVX512: # %bb.0:
6525 ; X32-AVX512-NEXT: vmovaps %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x29,0x07]
6526 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
6527 store <2 x i64> %a1, <2 x i64>* %a0, align 16
6531 define void @test_mm_store1_pd(double *%a0, <2 x double> %a1) {
6532 ; X86-SSE-LABEL: test_mm_store1_pd:
6534 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6535 ; X86-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
6536 ; X86-SSE-NEXT: # xmm0 = xmm0[0,0]
6537 ; X86-SSE-NEXT: movaps %xmm0, (%eax) # encoding: [0x0f,0x29,0x00]
6538 ; X86-SSE-NEXT: retl # encoding: [0xc3]
6540 ; X86-AVX1-LABEL: test_mm_store1_pd:
6541 ; X86-AVX1: # %bb.0:
6542 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6543 ; X86-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
6544 ; X86-AVX1-NEXT: # xmm0 = xmm0[0,0]
6545 ; X86-AVX1-NEXT: vmovaps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x29,0x00]
6546 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
6548 ; X86-AVX512-LABEL: test_mm_store1_pd:
6549 ; X86-AVX512: # %bb.0:
6550 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6551 ; X86-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
6552 ; X86-AVX512-NEXT: # xmm0 = xmm0[0,0]
6553 ; X86-AVX512-NEXT: vmovaps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x00]
6554 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
6556 ; X64-SSE-LABEL: test_mm_store1_pd:
6558 ; X64-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
6559 ; X64-SSE-NEXT: # xmm0 = xmm0[0,0]
6560 ; X64-SSE-NEXT: movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
6561 ; X64-SSE-NEXT: retq # encoding: [0xc3]
6563 ; X64-AVX1-LABEL: test_mm_store1_pd:
6564 ; X64-AVX1: # %bb.0:
6565 ; X64-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
6566 ; X64-AVX1-NEXT: # xmm0 = xmm0[0,0]
6567 ; X64-AVX1-NEXT: vmovaps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x29,0x07]
6568 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
6570 ; X64-AVX512-LABEL: test_mm_store1_pd:
6571 ; X64-AVX512: # %bb.0:
6572 ; X64-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
6573 ; X64-AVX512-NEXT: # xmm0 = xmm0[0,0]
6574 ; X64-AVX512-NEXT: vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
6575 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
6577 ; X32-SSE-LABEL: test_mm_store1_pd:
6579 ; X32-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
6580 ; X32-SSE-NEXT: # xmm0 = xmm0[0,0]
6581 ; X32-SSE-NEXT: movaps %xmm0, (%edi) # encoding: [0x67,0x0f,0x29,0x07]
6582 ; X32-SSE-NEXT: retq # encoding: [0xc3]
6584 ; X32-AVX1-LABEL: test_mm_store1_pd:
6585 ; X32-AVX1: # %bb.0:
6586 ; X32-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
6587 ; X32-AVX1-NEXT: # xmm0 = xmm0[0,0]
6588 ; X32-AVX1-NEXT: vmovaps %xmm0, (%edi) # encoding: [0x67,0xc5,0xf8,0x29,0x07]
6589 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
6591 ; X32-AVX512-LABEL: test_mm_store1_pd:
6592 ; X32-AVX512: # %bb.0:
6593 ; X32-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
6594 ; X32-AVX512-NEXT: # xmm0 = xmm0[0,0]
6595 ; X32-AVX512-NEXT: vmovaps %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x29,0x07]
6596 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
6597 %arg0 = bitcast double * %a0 to <2 x double>*
6598 %shuf = shufflevector <2 x double> %a1, <2 x double> undef, <2 x i32> zeroinitializer
6599 store <2 x double> %shuf, <2 x double>* %arg0, align 16
6603 define void @test_mm_storeh_sd(double *%a0, <2 x double> %a1) {
6604 ; X86-SSE-LABEL: test_mm_storeh_sd:
6606 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6607 ; X86-SSE-NEXT: movhlps %xmm0, %xmm0 # encoding: [0x0f,0x12,0xc0]
6608 ; X86-SSE-NEXT: # xmm0 = xmm0[1,1]
6609 ; X86-SSE-NEXT: movsd %xmm0, (%eax) # encoding: [0xf2,0x0f,0x11,0x00]
6610 ; X86-SSE-NEXT: retl # encoding: [0xc3]
6612 ; X86-AVX1-LABEL: test_mm_storeh_sd:
6613 ; X86-AVX1: # %bb.0:
6614 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6615 ; X86-AVX1-NEXT: vpermilpd $1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x05,0xc0,0x01]
6616 ; X86-AVX1-NEXT: # xmm0 = xmm0[1,0]
6617 ; X86-AVX1-NEXT: vmovsd %xmm0, (%eax) # encoding: [0xc5,0xfb,0x11,0x00]
6618 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
6620 ; X86-AVX512-LABEL: test_mm_storeh_sd:
6621 ; X86-AVX512: # %bb.0:
6622 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6623 ; X86-AVX512-NEXT: vpermilpd $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x05,0xc0,0x01]
6624 ; X86-AVX512-NEXT: # xmm0 = xmm0[1,0]
6625 ; X86-AVX512-NEXT: vmovsd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x00]
6626 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
6628 ; X64-SSE-LABEL: test_mm_storeh_sd:
6630 ; X64-SSE-NEXT: movhlps %xmm0, %xmm0 # encoding: [0x0f,0x12,0xc0]
6631 ; X64-SSE-NEXT: # xmm0 = xmm0[1,1]
6632 ; X64-SSE-NEXT: movsd %xmm0, (%rdi) # encoding: [0xf2,0x0f,0x11,0x07]
6633 ; X64-SSE-NEXT: retq # encoding: [0xc3]
6635 ; X64-AVX1-LABEL: test_mm_storeh_sd:
6636 ; X64-AVX1: # %bb.0:
6637 ; X64-AVX1-NEXT: vpermilpd $1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x05,0xc0,0x01]
6638 ; X64-AVX1-NEXT: # xmm0 = xmm0[1,0]
6639 ; X64-AVX1-NEXT: vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
6640 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
6642 ; X64-AVX512-LABEL: test_mm_storeh_sd:
6643 ; X64-AVX512: # %bb.0:
6644 ; X64-AVX512-NEXT: vpermilpd $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x05,0xc0,0x01]
6645 ; X64-AVX512-NEXT: # xmm0 = xmm0[1,0]
6646 ; X64-AVX512-NEXT: vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
6647 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
6649 ; X32-SSE-LABEL: test_mm_storeh_sd:
6651 ; X32-SSE-NEXT: movhlps %xmm0, %xmm0 # encoding: [0x0f,0x12,0xc0]
6652 ; X32-SSE-NEXT: # xmm0 = xmm0[1,1]
6653 ; X32-SSE-NEXT: movsd %xmm0, (%edi) # encoding: [0x67,0xf2,0x0f,0x11,0x07]
6654 ; X32-SSE-NEXT: retq # encoding: [0xc3]
6656 ; X32-AVX1-LABEL: test_mm_storeh_sd:
6657 ; X32-AVX1: # %bb.0:
6658 ; X32-AVX1-NEXT: vpermilpd $1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x05,0xc0,0x01]
6659 ; X32-AVX1-NEXT: # xmm0 = xmm0[1,0]
6660 ; X32-AVX1-NEXT: vmovsd %xmm0, (%edi) # encoding: [0x67,0xc5,0xfb,0x11,0x07]
6661 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
6663 ; X32-AVX512-LABEL: test_mm_storeh_sd:
6664 ; X32-AVX512: # %bb.0:
6665 ; X32-AVX512-NEXT: vpermilpd $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x05,0xc0,0x01]
6666 ; X32-AVX512-NEXT: # xmm0 = xmm0[1,0]
6667 ; X32-AVX512-NEXT: vmovsd %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x11,0x07]
6668 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
6669 %ext = extractelement <2 x double> %a1, i32 1
6670 store double %ext, double* %a0, align 8
6674 define void @test_mm_storel_epi64(<2 x i64> *%a0, <2 x i64> %a1) {
6675 ; X86-SSE-LABEL: test_mm_storel_epi64:
6677 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6678 ; X86-SSE-NEXT: movlps %xmm0, (%eax) # encoding: [0x0f,0x13,0x00]
6679 ; X86-SSE-NEXT: retl # encoding: [0xc3]
6681 ; X86-AVX1-LABEL: test_mm_storel_epi64:
6682 ; X86-AVX1: # %bb.0:
6683 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6684 ; X86-AVX1-NEXT: vmovlps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x13,0x00]
6685 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
6687 ; X86-AVX512-LABEL: test_mm_storel_epi64:
6688 ; X86-AVX512: # %bb.0:
6689 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6690 ; X86-AVX512-NEXT: vmovlps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x13,0x00]
6691 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
6693 ; X64-SSE-LABEL: test_mm_storel_epi64:
6695 ; X64-SSE-NEXT: movq %xmm0, %rax # encoding: [0x66,0x48,0x0f,0x7e,0xc0]
6696 ; X64-SSE-NEXT: movq %rax, (%rdi) # encoding: [0x48,0x89,0x07]
6697 ; X64-SSE-NEXT: retq # encoding: [0xc3]
6699 ; X64-AVX1-LABEL: test_mm_storel_epi64:
6700 ; X64-AVX1: # %bb.0:
6701 ; X64-AVX1-NEXT: vmovq %xmm0, %rax # encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
6702 ; X64-AVX1-NEXT: movq %rax, (%rdi) # encoding: [0x48,0x89,0x07]
6703 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
6705 ; X64-AVX512-LABEL: test_mm_storel_epi64:
6706 ; X64-AVX512: # %bb.0:
6707 ; X64-AVX512-NEXT: vmovq %xmm0, %rax # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
6708 ; X64-AVX512-NEXT: movq %rax, (%rdi) # encoding: [0x48,0x89,0x07]
6709 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
6711 ; X32-SSE-LABEL: test_mm_storel_epi64:
6713 ; X32-SSE-NEXT: movq %xmm0, %rax # encoding: [0x66,0x48,0x0f,0x7e,0xc0]
6714 ; X32-SSE-NEXT: movq %rax, (%edi) # encoding: [0x67,0x48,0x89,0x07]
6715 ; X32-SSE-NEXT: retq # encoding: [0xc3]
6717 ; X32-AVX1-LABEL: test_mm_storel_epi64:
6718 ; X32-AVX1: # %bb.0:
6719 ; X32-AVX1-NEXT: vmovq %xmm0, %rax # encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
6720 ; X32-AVX1-NEXT: movq %rax, (%edi) # encoding: [0x67,0x48,0x89,0x07]
6721 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
6723 ; X32-AVX512-LABEL: test_mm_storel_epi64:
6724 ; X32-AVX512: # %bb.0:
6725 ; X32-AVX512-NEXT: vmovq %xmm0, %rax # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
6726 ; X32-AVX512-NEXT: movq %rax, (%edi) # encoding: [0x67,0x48,0x89,0x07]
6727 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
6728 %ext = extractelement <2 x i64> %a1, i32 0
6729 %bc = bitcast <2 x i64> *%a0 to i64*
6730 store i64 %ext, i64* %bc, align 8
6734 define void @test_mm_storel_sd(double *%a0, <2 x double> %a1) {
6735 ; X86-SSE-LABEL: test_mm_storel_sd:
6737 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6738 ; X86-SSE-NEXT: movsd %xmm0, (%eax) # encoding: [0xf2,0x0f,0x11,0x00]
6739 ; X86-SSE-NEXT: retl # encoding: [0xc3]
6741 ; X86-AVX1-LABEL: test_mm_storel_sd:
6742 ; X86-AVX1: # %bb.0:
6743 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6744 ; X86-AVX1-NEXT: vmovsd %xmm0, (%eax) # encoding: [0xc5,0xfb,0x11,0x00]
6745 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
6747 ; X86-AVX512-LABEL: test_mm_storel_sd:
6748 ; X86-AVX512: # %bb.0:
6749 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6750 ; X86-AVX512-NEXT: vmovsd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x00]
6751 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
6753 ; X64-SSE-LABEL: test_mm_storel_sd:
6755 ; X64-SSE-NEXT: movsd %xmm0, (%rdi) # encoding: [0xf2,0x0f,0x11,0x07]
6756 ; X64-SSE-NEXT: retq # encoding: [0xc3]
6758 ; X64-AVX1-LABEL: test_mm_storel_sd:
6759 ; X64-AVX1: # %bb.0:
6760 ; X64-AVX1-NEXT: vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
6761 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
6763 ; X64-AVX512-LABEL: test_mm_storel_sd:
6764 ; X64-AVX512: # %bb.0:
6765 ; X64-AVX512-NEXT: vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
6766 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
6768 ; X32-SSE-LABEL: test_mm_storel_sd:
6770 ; X32-SSE-NEXT: movsd %xmm0, (%edi) # encoding: [0x67,0xf2,0x0f,0x11,0x07]
6771 ; X32-SSE-NEXT: retq # encoding: [0xc3]
6773 ; X32-AVX1-LABEL: test_mm_storel_sd:
6774 ; X32-AVX1: # %bb.0:
6775 ; X32-AVX1-NEXT: vmovsd %xmm0, (%edi) # encoding: [0x67,0xc5,0xfb,0x11,0x07]
6776 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
6778 ; X32-AVX512-LABEL: test_mm_storel_sd:
6779 ; X32-AVX512: # %bb.0:
6780 ; X32-AVX512-NEXT: vmovsd %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x11,0x07]
6781 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
6782 %ext = extractelement <2 x double> %a1, i32 0
6783 store double %ext, double* %a0, align 8
6787 define void @test_mm_storer_pd(double *%a0, <2 x double> %a1) {
6788 ; X86-SSE-LABEL: test_mm_storer_pd:
6790 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6791 ; X86-SSE-NEXT: shufps $78, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x4e]
6792 ; X86-SSE-NEXT: # xmm0 = xmm0[2,3,0,1]
6793 ; X86-SSE-NEXT: movaps %xmm0, (%eax) # encoding: [0x0f,0x29,0x00]
6794 ; X86-SSE-NEXT: retl # encoding: [0xc3]
6796 ; X86-AVX1-LABEL: test_mm_storer_pd:
6797 ; X86-AVX1: # %bb.0:
6798 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6799 ; X86-AVX1-NEXT: vpermilpd $1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x05,0xc0,0x01]
6800 ; X86-AVX1-NEXT: # xmm0 = xmm0[1,0]
6801 ; X86-AVX1-NEXT: vmovapd %xmm0, (%eax) # encoding: [0xc5,0xf9,0x29,0x00]
6802 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
6804 ; X86-AVX512-LABEL: test_mm_storer_pd:
6805 ; X86-AVX512: # %bb.0:
6806 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6807 ; X86-AVX512-NEXT: vpermilpd $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x05,0xc0,0x01]
6808 ; X86-AVX512-NEXT: # xmm0 = xmm0[1,0]
6809 ; X86-AVX512-NEXT: vmovapd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x29,0x00]
6810 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
6812 ; X64-SSE-LABEL: test_mm_storer_pd:
6814 ; X64-SSE-NEXT: shufps $78, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x4e]
6815 ; X64-SSE-NEXT: # xmm0 = xmm0[2,3,0,1]
6816 ; X64-SSE-NEXT: movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
6817 ; X64-SSE-NEXT: retq # encoding: [0xc3]
6819 ; X64-AVX1-LABEL: test_mm_storer_pd:
6820 ; X64-AVX1: # %bb.0:
6821 ; X64-AVX1-NEXT: vpermilpd $1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x05,0xc0,0x01]
6822 ; X64-AVX1-NEXT: # xmm0 = xmm0[1,0]
6823 ; X64-AVX1-NEXT: vmovapd %xmm0, (%rdi) # encoding: [0xc5,0xf9,0x29,0x07]
6824 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
6826 ; X64-AVX512-LABEL: test_mm_storer_pd:
6827 ; X64-AVX512: # %bb.0:
6828 ; X64-AVX512-NEXT: vpermilpd $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x05,0xc0,0x01]
6829 ; X64-AVX512-NEXT: # xmm0 = xmm0[1,0]
6830 ; X64-AVX512-NEXT: vmovapd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x29,0x07]
6831 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
6833 ; X32-SSE-LABEL: test_mm_storer_pd:
6835 ; X32-SSE-NEXT: shufps $78, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x4e]
6836 ; X32-SSE-NEXT: # xmm0 = xmm0[2,3,0,1]
6837 ; X32-SSE-NEXT: movaps %xmm0, (%edi) # encoding: [0x67,0x0f,0x29,0x07]
6838 ; X32-SSE-NEXT: retq # encoding: [0xc3]
6840 ; X32-AVX1-LABEL: test_mm_storer_pd:
6841 ; X32-AVX1: # %bb.0:
6842 ; X32-AVX1-NEXT: vpermilpd $1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x05,0xc0,0x01]
6843 ; X32-AVX1-NEXT: # xmm0 = xmm0[1,0]
6844 ; X32-AVX1-NEXT: vmovapd %xmm0, (%edi) # encoding: [0x67,0xc5,0xf9,0x29,0x07]
6845 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
6847 ; X32-AVX512-LABEL: test_mm_storer_pd:
6848 ; X32-AVX512: # %bb.0:
6849 ; X32-AVX512-NEXT: vpermilpd $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x05,0xc0,0x01]
6850 ; X32-AVX512-NEXT: # xmm0 = xmm0[1,0]
6851 ; X32-AVX512-NEXT: vmovapd %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf9,0x29,0x07]
6852 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
6853 %arg0 = bitcast double* %a0 to <2 x double>*
6854 %shuf = shufflevector <2 x double> %a1, <2 x double> undef, <2 x i32> <i32 1, i32 0>
6855 store <2 x double> %shuf, <2 x double>* %arg0, align 16
6859 define void @test_mm_storeu_pd(double *%a0, <2 x double> %a1) {
6860 ; X86-SSE-LABEL: test_mm_storeu_pd:
6862 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6863 ; X86-SSE-NEXT: movups %xmm0, (%eax) # encoding: [0x0f,0x11,0x00]
6864 ; X86-SSE-NEXT: retl # encoding: [0xc3]
6866 ; X86-AVX1-LABEL: test_mm_storeu_pd:
6867 ; X86-AVX1: # %bb.0:
6868 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6869 ; X86-AVX1-NEXT: vmovups %xmm0, (%eax) # encoding: [0xc5,0xf8,0x11,0x00]
6870 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
6872 ; X86-AVX512-LABEL: test_mm_storeu_pd:
6873 ; X86-AVX512: # %bb.0:
6874 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6875 ; X86-AVX512-NEXT: vmovups %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x00]
6876 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
6878 ; X64-SSE-LABEL: test_mm_storeu_pd:
6880 ; X64-SSE-NEXT: movups %xmm0, (%rdi) # encoding: [0x0f,0x11,0x07]
6881 ; X64-SSE-NEXT: retq # encoding: [0xc3]
6883 ; X64-AVX1-LABEL: test_mm_storeu_pd:
6884 ; X64-AVX1: # %bb.0:
6885 ; X64-AVX1-NEXT: vmovups %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x11,0x07]
6886 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
6888 ; X64-AVX512-LABEL: test_mm_storeu_pd:
6889 ; X64-AVX512: # %bb.0:
6890 ; X64-AVX512-NEXT: vmovups %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07]
6891 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
6893 ; X32-SSE-LABEL: test_mm_storeu_pd:
6895 ; X32-SSE-NEXT: movups %xmm0, (%edi) # encoding: [0x67,0x0f,0x11,0x07]
6896 ; X32-SSE-NEXT: retq # encoding: [0xc3]
6898 ; X32-AVX1-LABEL: test_mm_storeu_pd:
6899 ; X32-AVX1: # %bb.0:
6900 ; X32-AVX1-NEXT: vmovups %xmm0, (%edi) # encoding: [0x67,0xc5,0xf8,0x11,0x07]
6901 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
6903 ; X32-AVX512-LABEL: test_mm_storeu_pd:
6904 ; X32-AVX512: # %bb.0:
6905 ; X32-AVX512-NEXT: vmovups %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x11,0x07]
6906 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
6907 %arg0 = bitcast double* %a0 to <2 x double>*
6908 store <2 x double> %a1, <2 x double>* %arg0, align 1
6912 define void @test_mm_storeu_si128(<2 x i64> *%a0, <2 x i64> %a1) {
6913 ; X86-SSE-LABEL: test_mm_storeu_si128:
6915 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6916 ; X86-SSE-NEXT: movups %xmm0, (%eax) # encoding: [0x0f,0x11,0x00]
6917 ; X86-SSE-NEXT: retl # encoding: [0xc3]
6919 ; X86-AVX1-LABEL: test_mm_storeu_si128:
6920 ; X86-AVX1: # %bb.0:
6921 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6922 ; X86-AVX1-NEXT: vmovups %xmm0, (%eax) # encoding: [0xc5,0xf8,0x11,0x00]
6923 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
6925 ; X86-AVX512-LABEL: test_mm_storeu_si128:
6926 ; X86-AVX512: # %bb.0:
6927 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6928 ; X86-AVX512-NEXT: vmovups %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x00]
6929 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
6931 ; X64-SSE-LABEL: test_mm_storeu_si128:
6933 ; X64-SSE-NEXT: movups %xmm0, (%rdi) # encoding: [0x0f,0x11,0x07]
6934 ; X64-SSE-NEXT: retq # encoding: [0xc3]
6936 ; X64-AVX1-LABEL: test_mm_storeu_si128:
6937 ; X64-AVX1: # %bb.0:
6938 ; X64-AVX1-NEXT: vmovups %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x11,0x07]
6939 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
6941 ; X64-AVX512-LABEL: test_mm_storeu_si128:
6942 ; X64-AVX512: # %bb.0:
6943 ; X64-AVX512-NEXT: vmovups %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07]
6944 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
6946 ; X32-SSE-LABEL: test_mm_storeu_si128:
6948 ; X32-SSE-NEXT: movups %xmm0, (%edi) # encoding: [0x67,0x0f,0x11,0x07]
6949 ; X32-SSE-NEXT: retq # encoding: [0xc3]
6951 ; X32-AVX1-LABEL: test_mm_storeu_si128:
6952 ; X32-AVX1: # %bb.0:
6953 ; X32-AVX1-NEXT: vmovups %xmm0, (%edi) # encoding: [0x67,0xc5,0xf8,0x11,0x07]
6954 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
6956 ; X32-AVX512-LABEL: test_mm_storeu_si128:
6957 ; X32-AVX512: # %bb.0:
6958 ; X32-AVX512-NEXT: vmovups %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x11,0x07]
6959 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
6960 store <2 x i64> %a1, <2 x i64>* %a0, align 1
6964 define void @test_mm_storeu_si64(i8* nocapture %A, <2 x i64> %B) {
6965 ; X86-SSE-LABEL: test_mm_storeu_si64:
6966 ; X86-SSE: # %bb.0: # %entry
6967 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6968 ; X86-SSE-NEXT: movlps %xmm0, (%eax) # encoding: [0x0f,0x13,0x00]
6969 ; X86-SSE-NEXT: retl # encoding: [0xc3]
6971 ; X86-AVX1-LABEL: test_mm_storeu_si64:
6972 ; X86-AVX1: # %bb.0: # %entry
6973 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6974 ; X86-AVX1-NEXT: vmovlps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x13,0x00]
6975 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
6977 ; X86-AVX512-LABEL: test_mm_storeu_si64:
6978 ; X86-AVX512: # %bb.0: # %entry
6979 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
6980 ; X86-AVX512-NEXT: vmovlps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x13,0x00]
6981 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
6983 ; X64-SSE-LABEL: test_mm_storeu_si64:
6984 ; X64-SSE: # %bb.0: # %entry
6985 ; X64-SSE-NEXT: movq %xmm0, %rax # encoding: [0x66,0x48,0x0f,0x7e,0xc0]
6986 ; X64-SSE-NEXT: movq %rax, (%rdi) # encoding: [0x48,0x89,0x07]
6987 ; X64-SSE-NEXT: retq # encoding: [0xc3]
6989 ; X64-AVX1-LABEL: test_mm_storeu_si64:
6990 ; X64-AVX1: # %bb.0: # %entry
6991 ; X64-AVX1-NEXT: vmovq %xmm0, %rax # encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
6992 ; X64-AVX1-NEXT: movq %rax, (%rdi) # encoding: [0x48,0x89,0x07]
6993 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
6995 ; X64-AVX512-LABEL: test_mm_storeu_si64:
6996 ; X64-AVX512: # %bb.0: # %entry
6997 ; X64-AVX512-NEXT: vmovq %xmm0, %rax # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
6998 ; X64-AVX512-NEXT: movq %rax, (%rdi) # encoding: [0x48,0x89,0x07]
6999 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
7001 ; X32-SSE-LABEL: test_mm_storeu_si64:
7002 ; X32-SSE: # %bb.0: # %entry
7003 ; X32-SSE-NEXT: movq %xmm0, %rax # encoding: [0x66,0x48,0x0f,0x7e,0xc0]
7004 ; X32-SSE-NEXT: movq %rax, (%edi) # encoding: [0x67,0x48,0x89,0x07]
7005 ; X32-SSE-NEXT: retq # encoding: [0xc3]
7007 ; X32-AVX1-LABEL: test_mm_storeu_si64:
7008 ; X32-AVX1: # %bb.0: # %entry
7009 ; X32-AVX1-NEXT: vmovq %xmm0, %rax # encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
7010 ; X32-AVX1-NEXT: movq %rax, (%edi) # encoding: [0x67,0x48,0x89,0x07]
7011 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
7013 ; X32-AVX512-LABEL: test_mm_storeu_si64:
7014 ; X32-AVX512: # %bb.0: # %entry
7015 ; X32-AVX512-NEXT: vmovq %xmm0, %rax # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
7016 ; X32-AVX512-NEXT: movq %rax, (%edi) # encoding: [0x67,0x48,0x89,0x07]
7017 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
7019 %vecext.i = extractelement <2 x i64> %B, i32 0
7020 %__v.i = bitcast i8* %A to i64*
7021 store i64 %vecext.i, i64* %__v.i, align 1
7025 define void @test_mm_storeu_si32(i8* nocapture %A, <2 x i64> %B) {
7026 ; X86-SSE-LABEL: test_mm_storeu_si32:
7027 ; X86-SSE: # %bb.0: # %entry
7028 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
7029 ; X86-SSE-NEXT: movd %xmm0, %ecx # encoding: [0x66,0x0f,0x7e,0xc1]
7030 ; X86-SSE-NEXT: movl %ecx, (%eax) # encoding: [0x89,0x08]
7031 ; X86-SSE-NEXT: retl # encoding: [0xc3]
7033 ; X86-AVX1-LABEL: test_mm_storeu_si32:
7034 ; X86-AVX1: # %bb.0: # %entry
7035 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
7036 ; X86-AVX1-NEXT: vmovd %xmm0, %ecx # encoding: [0xc5,0xf9,0x7e,0xc1]
7037 ; X86-AVX1-NEXT: movl %ecx, (%eax) # encoding: [0x89,0x08]
7038 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
7040 ; X86-AVX512-LABEL: test_mm_storeu_si32:
7041 ; X86-AVX512: # %bb.0: # %entry
7042 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
7043 ; X86-AVX512-NEXT: vmovd %xmm0, %ecx # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc1]
7044 ; X86-AVX512-NEXT: movl %ecx, (%eax) # encoding: [0x89,0x08]
7045 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
7047 ; X64-SSE-LABEL: test_mm_storeu_si32:
7048 ; X64-SSE: # %bb.0: # %entry
7049 ; X64-SSE-NEXT: movd %xmm0, %eax # encoding: [0x66,0x0f,0x7e,0xc0]
7050 ; X64-SSE-NEXT: movl %eax, (%rdi) # encoding: [0x89,0x07]
7051 ; X64-SSE-NEXT: retq # encoding: [0xc3]
7053 ; X64-AVX1-LABEL: test_mm_storeu_si32:
7054 ; X64-AVX1: # %bb.0: # %entry
7055 ; X64-AVX1-NEXT: vmovd %xmm0, %eax # encoding: [0xc5,0xf9,0x7e,0xc0]
7056 ; X64-AVX1-NEXT: movl %eax, (%rdi) # encoding: [0x89,0x07]
7057 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
7059 ; X64-AVX512-LABEL: test_mm_storeu_si32:
7060 ; X64-AVX512: # %bb.0: # %entry
7061 ; X64-AVX512-NEXT: vmovd %xmm0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc0]
7062 ; X64-AVX512-NEXT: movl %eax, (%rdi) # encoding: [0x89,0x07]
7063 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
7065 ; X32-SSE-LABEL: test_mm_storeu_si32:
7066 ; X32-SSE: # %bb.0: # %entry
7067 ; X32-SSE-NEXT: movd %xmm0, %eax # encoding: [0x66,0x0f,0x7e,0xc0]
7068 ; X32-SSE-NEXT: movl %eax, (%edi) # encoding: [0x67,0x89,0x07]
7069 ; X32-SSE-NEXT: retq # encoding: [0xc3]
7071 ; X32-AVX1-LABEL: test_mm_storeu_si32:
7072 ; X32-AVX1: # %bb.0: # %entry
7073 ; X32-AVX1-NEXT: vmovd %xmm0, %eax # encoding: [0xc5,0xf9,0x7e,0xc0]
7074 ; X32-AVX1-NEXT: movl %eax, (%edi) # encoding: [0x67,0x89,0x07]
7075 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
7077 ; X32-AVX512-LABEL: test_mm_storeu_si32:
7078 ; X32-AVX512: # %bb.0: # %entry
7079 ; X32-AVX512-NEXT: vmovd %xmm0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc0]
7080 ; X32-AVX512-NEXT: movl %eax, (%edi) # encoding: [0x67,0x89,0x07]
7081 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
7083 %0 = bitcast <2 x i64> %B to <4 x i32>
7084 %vecext.i = extractelement <4 x i32> %0, i32 0
7085 %__v.i = bitcast i8* %A to i32*
7086 store i32 %vecext.i, i32* %__v.i, align 1
7090 define void @test_mm_storeu_si16(i8* nocapture %A, <2 x i64> %B) {
7091 ; X86-SSE-LABEL: test_mm_storeu_si16:
7092 ; X86-SSE: # %bb.0: # %entry
7093 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
7094 ; X86-SSE-NEXT: movd %xmm0, %ecx # encoding: [0x66,0x0f,0x7e,0xc1]
7095 ; X86-SSE-NEXT: movw %cx, (%eax) # encoding: [0x66,0x89,0x08]
7096 ; X86-SSE-NEXT: retl # encoding: [0xc3]
7098 ; X86-AVX1-LABEL: test_mm_storeu_si16:
7099 ; X86-AVX1: # %bb.0: # %entry
7100 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
7101 ; X86-AVX1-NEXT: vmovd %xmm0, %ecx # encoding: [0xc5,0xf9,0x7e,0xc1]
7102 ; X86-AVX1-NEXT: movw %cx, (%eax) # encoding: [0x66,0x89,0x08]
7103 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
7105 ; X86-AVX512-LABEL: test_mm_storeu_si16:
7106 ; X86-AVX512: # %bb.0: # %entry
7107 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
7108 ; X86-AVX512-NEXT: vmovd %xmm0, %ecx # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc1]
7109 ; X86-AVX512-NEXT: movw %cx, (%eax) # encoding: [0x66,0x89,0x08]
7110 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
7112 ; X64-SSE-LABEL: test_mm_storeu_si16:
7113 ; X64-SSE: # %bb.0: # %entry
7114 ; X64-SSE-NEXT: movd %xmm0, %eax # encoding: [0x66,0x0f,0x7e,0xc0]
7115 ; X64-SSE-NEXT: movw %ax, (%rdi) # encoding: [0x66,0x89,0x07]
7116 ; X64-SSE-NEXT: retq # encoding: [0xc3]
7118 ; X64-AVX1-LABEL: test_mm_storeu_si16:
7119 ; X64-AVX1: # %bb.0: # %entry
7120 ; X64-AVX1-NEXT: vmovd %xmm0, %eax # encoding: [0xc5,0xf9,0x7e,0xc0]
7121 ; X64-AVX1-NEXT: movw %ax, (%rdi) # encoding: [0x66,0x89,0x07]
7122 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
7124 ; X64-AVX512-LABEL: test_mm_storeu_si16:
7125 ; X64-AVX512: # %bb.0: # %entry
7126 ; X64-AVX512-NEXT: vmovd %xmm0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc0]
7127 ; X64-AVX512-NEXT: movw %ax, (%rdi) # encoding: [0x66,0x89,0x07]
7128 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
7130 ; X32-SSE-LABEL: test_mm_storeu_si16:
7131 ; X32-SSE: # %bb.0: # %entry
7132 ; X32-SSE-NEXT: movd %xmm0, %eax # encoding: [0x66,0x0f,0x7e,0xc0]
7133 ; X32-SSE-NEXT: movw %ax, (%edi) # encoding: [0x67,0x66,0x89,0x07]
7134 ; X32-SSE-NEXT: retq # encoding: [0xc3]
7136 ; X32-AVX1-LABEL: test_mm_storeu_si16:
7137 ; X32-AVX1: # %bb.0: # %entry
7138 ; X32-AVX1-NEXT: vmovd %xmm0, %eax # encoding: [0xc5,0xf9,0x7e,0xc0]
7139 ; X32-AVX1-NEXT: movw %ax, (%edi) # encoding: [0x67,0x66,0x89,0x07]
7140 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
7142 ; X32-AVX512-LABEL: test_mm_storeu_si16:
7143 ; X32-AVX512: # %bb.0: # %entry
7144 ; X32-AVX512-NEXT: vmovd %xmm0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc0]
7145 ; X32-AVX512-NEXT: movw %ax, (%edi) # encoding: [0x67,0x66,0x89,0x07]
7146 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
7148 %0 = bitcast <2 x i64> %B to <8 x i16>
7149 %vecext.i = extractelement <8 x i16> %0, i32 0
7150 %__v.i = bitcast i8* %A to i16*
7151 store i16 %vecext.i, i16* %__v.i, align 1
7155 define void @test_mm_stream_pd(double *%a0, <2 x double> %a1) {
7156 ; X86-SSE-LABEL: test_mm_stream_pd:
7158 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
7159 ; X86-SSE-NEXT: movntps %xmm0, (%eax) # encoding: [0x0f,0x2b,0x00]
7160 ; X86-SSE-NEXT: retl # encoding: [0xc3]
7162 ; X86-AVX1-LABEL: test_mm_stream_pd:
7163 ; X86-AVX1: # %bb.0:
7164 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
7165 ; X86-AVX1-NEXT: vmovntps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x2b,0x00]
7166 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
7168 ; X86-AVX512-LABEL: test_mm_stream_pd:
7169 ; X86-AVX512: # %bb.0:
7170 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
7171 ; X86-AVX512-NEXT: vmovntps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2b,0x00]
7172 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
7174 ; X64-SSE-LABEL: test_mm_stream_pd:
7176 ; X64-SSE-NEXT: movntps %xmm0, (%rdi) # encoding: [0x0f,0x2b,0x07]
7177 ; X64-SSE-NEXT: retq # encoding: [0xc3]
7179 ; X64-AVX1-LABEL: test_mm_stream_pd:
7180 ; X64-AVX1: # %bb.0:
7181 ; X64-AVX1-NEXT: vmovntps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x2b,0x07]
7182 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
7184 ; X64-AVX512-LABEL: test_mm_stream_pd:
7185 ; X64-AVX512: # %bb.0:
7186 ; X64-AVX512-NEXT: vmovntps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2b,0x07]
7187 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
7189 ; X32-SSE-LABEL: test_mm_stream_pd:
7191 ; X32-SSE-NEXT: movntps %xmm0, (%edi) # encoding: [0x67,0x0f,0x2b,0x07]
7192 ; X32-SSE-NEXT: retq # encoding: [0xc3]
7194 ; X32-AVX1-LABEL: test_mm_stream_pd:
7195 ; X32-AVX1: # %bb.0:
7196 ; X32-AVX1-NEXT: vmovntps %xmm0, (%edi) # encoding: [0x67,0xc5,0xf8,0x2b,0x07]
7197 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
7199 ; X32-AVX512-LABEL: test_mm_stream_pd:
7200 ; X32-AVX512: # %bb.0:
7201 ; X32-AVX512-NEXT: vmovntps %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x2b,0x07]
7202 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
7203 %arg0 = bitcast double* %a0 to <2 x double>*
7204 store <2 x double> %a1, <2 x double>* %arg0, align 16, !nontemporal !0
7208 define void @test_mm_stream_si32(i32 *%a0, i32 %a1) {
7209 ; X86-LABEL: test_mm_stream_si32:
7211 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
7212 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
7213 ; X86-NEXT: movntil %eax, (%ecx) # encoding: [0x0f,0xc3,0x01]
7214 ; X86-NEXT: retl # encoding: [0xc3]
7216 ; X64-LABEL: test_mm_stream_si32:
7218 ; X64-NEXT: movntil %esi, (%rdi) # encoding: [0x0f,0xc3,0x37]
7219 ; X64-NEXT: retq # encoding: [0xc3]
7221 ; X32-LABEL: test_mm_stream_si32:
7223 ; X32-NEXT: movntil %esi, (%edi) # encoding: [0x67,0x0f,0xc3,0x37]
7224 ; X32-NEXT: retq # encoding: [0xc3]
7225 store i32 %a1, i32* %a0, align 1, !nontemporal !0
7229 define void @test_mm_stream_si128(<2 x i64> *%a0, <2 x i64> %a1) {
7230 ; X86-SSE-LABEL: test_mm_stream_si128:
7232 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
7233 ; X86-SSE-NEXT: movntps %xmm0, (%eax) # encoding: [0x0f,0x2b,0x00]
7234 ; X86-SSE-NEXT: retl # encoding: [0xc3]
7236 ; X86-AVX1-LABEL: test_mm_stream_si128:
7237 ; X86-AVX1: # %bb.0:
7238 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
7239 ; X86-AVX1-NEXT: vmovntps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x2b,0x00]
7240 ; X86-AVX1-NEXT: retl # encoding: [0xc3]
7242 ; X86-AVX512-LABEL: test_mm_stream_si128:
7243 ; X86-AVX512: # %bb.0:
7244 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
7245 ; X86-AVX512-NEXT: vmovntps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2b,0x00]
7246 ; X86-AVX512-NEXT: retl # encoding: [0xc3]
7248 ; X64-SSE-LABEL: test_mm_stream_si128:
7250 ; X64-SSE-NEXT: movntps %xmm0, (%rdi) # encoding: [0x0f,0x2b,0x07]
7251 ; X64-SSE-NEXT: retq # encoding: [0xc3]
7253 ; X64-AVX1-LABEL: test_mm_stream_si128:
7254 ; X64-AVX1: # %bb.0:
7255 ; X64-AVX1-NEXT: vmovntps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x2b,0x07]
7256 ; X64-AVX1-NEXT: retq # encoding: [0xc3]
7258 ; X64-AVX512-LABEL: test_mm_stream_si128:
7259 ; X64-AVX512: # %bb.0:
7260 ; X64-AVX512-NEXT: vmovntps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2b,0x07]
7261 ; X64-AVX512-NEXT: retq # encoding: [0xc3]
7263 ; X32-SSE-LABEL: test_mm_stream_si128:
7265 ; X32-SSE-NEXT: movntps %xmm0, (%edi) # encoding: [0x67,0x0f,0x2b,0x07]
7266 ; X32-SSE-NEXT: retq # encoding: [0xc3]
7268 ; X32-AVX1-LABEL: test_mm_stream_si128:
7269 ; X32-AVX1: # %bb.0:
7270 ; X32-AVX1-NEXT: vmovntps %xmm0, (%edi) # encoding: [0x67,0xc5,0xf8,0x2b,0x07]
7271 ; X32-AVX1-NEXT: retq # encoding: [0xc3]
7273 ; X32-AVX512-LABEL: test_mm_stream_si128:
7274 ; X32-AVX512: # %bb.0:
7275 ; X32-AVX512-NEXT: vmovntps %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x2b,0x07]
7276 ; X32-AVX512-NEXT: retq # encoding: [0xc3]
7277 store <2 x i64> %a1, <2 x i64>* %a0, align 16, !nontemporal !0
7281 define <2 x i64> @test_mm_sub_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
7282 ; SSE-LABEL: test_mm_sub_epi8:
7284 ; SSE-NEXT: psubb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf8,0xc1]
7285 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7287 ; AVX1-LABEL: test_mm_sub_epi8:
7289 ; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf8,0xc1]
7290 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7292 ; AVX512-LABEL: test_mm_sub_epi8:
7294 ; AVX512-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf8,0xc1]
7295 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7296 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
7297 %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
7298 %res = sub <16 x i8> %arg0, %arg1
7299 %bc = bitcast <16 x i8> %res to <2 x i64>
7303 define <2 x i64> @test_mm_sub_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
7304 ; SSE-LABEL: test_mm_sub_epi16:
7306 ; SSE-NEXT: psubw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf9,0xc1]
7307 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7309 ; AVX1-LABEL: test_mm_sub_epi16:
7311 ; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf9,0xc1]
7312 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7314 ; AVX512-LABEL: test_mm_sub_epi16:
7316 ; AVX512-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf9,0xc1]
7317 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7318 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
7319 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
7320 %res = sub <8 x i16> %arg0, %arg1
7321 %bc = bitcast <8 x i16> %res to <2 x i64>
7325 define <2 x i64> @test_mm_sub_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
7326 ; SSE-LABEL: test_mm_sub_epi32:
7328 ; SSE-NEXT: psubd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xfa,0xc1]
7329 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7331 ; AVX1-LABEL: test_mm_sub_epi32:
7333 ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfa,0xc1]
7334 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7336 ; AVX512-LABEL: test_mm_sub_epi32:
7338 ; AVX512-NEXT: vpsubd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfa,0xc1]
7339 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7340 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
7341 %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
7342 %res = sub <4 x i32> %arg0, %arg1
7343 %bc = bitcast <4 x i32> %res to <2 x i64>
7347 define <2 x i64> @test_mm_sub_epi64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
7348 ; SSE-LABEL: test_mm_sub_epi64:
7350 ; SSE-NEXT: psubq %xmm1, %xmm0 # encoding: [0x66,0x0f,0xfb,0xc1]
7351 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7353 ; AVX1-LABEL: test_mm_sub_epi64:
7355 ; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfb,0xc1]
7356 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7358 ; AVX512-LABEL: test_mm_sub_epi64:
7360 ; AVX512-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1]
7361 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7362 %res = sub <2 x i64> %a0, %a1
7366 define <2 x double> @test_mm_sub_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
7367 ; SSE-LABEL: test_mm_sub_pd:
7369 ; SSE-NEXT: subpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x5c,0xc1]
7370 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7372 ; AVX1-LABEL: test_mm_sub_pd:
7374 ; AVX1-NEXT: vsubpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x5c,0xc1]
7375 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7377 ; AVX512-LABEL: test_mm_sub_pd:
7379 ; AVX512-NEXT: vsubpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5c,0xc1]
7380 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7381 %res = fsub <2 x double> %a0, %a1
7382 ret <2 x double> %res
7385 define <2 x double> @test_mm_sub_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
7386 ; SSE-LABEL: test_mm_sub_sd:
7388 ; SSE-NEXT: subsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x5c,0xc1]
7389 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7391 ; AVX1-LABEL: test_mm_sub_sd:
7393 ; AVX1-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x5c,0xc1]
7394 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7396 ; AVX512-LABEL: test_mm_sub_sd:
7398 ; AVX512-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5c,0xc1]
7399 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7400 %ext0 = extractelement <2 x double> %a0, i32 0
7401 %ext1 = extractelement <2 x double> %a1, i32 0
7402 %fsub = fsub double %ext0, %ext1
7403 %res = insertelement <2 x double> %a0, double %fsub, i32 0
7404 ret <2 x double> %res
7407 define <2 x i64> @test_mm_subs_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
7408 ; SSE-LABEL: test_mm_subs_epi8:
7410 ; SSE-NEXT: psubsb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xe8,0xc1]
7411 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7413 ; AVX1-LABEL: test_mm_subs_epi8:
7415 ; AVX1-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe8,0xc1]
7416 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7418 ; AVX512-LABEL: test_mm_subs_epi8:
7420 ; AVX512-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe8,0xc1]
7421 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7422 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
7423 %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
7424 %res = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
7425 %bc = bitcast <16 x i8> %res to <2 x i64>
7428 declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
7430 define <2 x i64> @test_mm_subs_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
7431 ; SSE-LABEL: test_mm_subs_epi16:
7433 ; SSE-NEXT: psubsw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xe9,0xc1]
7434 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7436 ; AVX1-LABEL: test_mm_subs_epi16:
7438 ; AVX1-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe9,0xc1]
7439 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7441 ; AVX512-LABEL: test_mm_subs_epi16:
7443 ; AVX512-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe9,0xc1]
7444 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7445 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
7446 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
7447 %res = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
7448 %bc = bitcast <8 x i16> %res to <2 x i64>
7451 declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
7453 define <2 x i64> @test_mm_subs_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
7454 ; SSE-LABEL: test_mm_subs_epu8:
7456 ; SSE-NEXT: psubusb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd8,0xc1]
7457 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7459 ; AVX1-LABEL: test_mm_subs_epu8:
7461 ; AVX1-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd8,0xc1]
7462 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7464 ; AVX512-LABEL: test_mm_subs_epu8:
7466 ; AVX512-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1]
7467 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7468 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
7469 %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
7470 %res = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
7471 %bc = bitcast <16 x i8> %res to <2 x i64>
7474 declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>)
7476 define <2 x i64> @test_mm_subs_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
7477 ; SSE-LABEL: test_mm_subs_epu16:
7479 ; SSE-NEXT: psubusw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd9,0xc1]
7480 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7482 ; AVX1-LABEL: test_mm_subs_epu16:
7484 ; AVX1-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd9,0xc1]
7485 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7487 ; AVX512-LABEL: test_mm_subs_epu16:
7489 ; AVX512-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1]
7490 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7491 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
7492 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
7493 %res = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
7494 %bc = bitcast <8 x i16> %res to <2 x i64>
7497 declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
7499 define i32 @test_mm_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
7500 ; SSE-LABEL: test_mm_ucomieq_sd:
7502 ; SSE-NEXT: ucomisd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x2e,0xc1]
7503 ; SSE-NEXT: setnp %al # encoding: [0x0f,0x9b,0xc0]
7504 ; SSE-NEXT: sete %cl # encoding: [0x0f,0x94,0xc1]
7505 ; SSE-NEXT: andb %al, %cl # encoding: [0x20,0xc1]
7506 ; SSE-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
7507 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7509 ; AVX1-LABEL: test_mm_ucomieq_sd:
7511 ; AVX1-NEXT: vucomisd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x2e,0xc1]
7512 ; AVX1-NEXT: setnp %al # encoding: [0x0f,0x9b,0xc0]
7513 ; AVX1-NEXT: sete %cl # encoding: [0x0f,0x94,0xc1]
7514 ; AVX1-NEXT: andb %al, %cl # encoding: [0x20,0xc1]
7515 ; AVX1-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
7516 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7518 ; AVX512-LABEL: test_mm_ucomieq_sd:
7520 ; AVX512-NEXT: vucomisd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
7521 ; AVX512-NEXT: setnp %al # encoding: [0x0f,0x9b,0xc0]
7522 ; AVX512-NEXT: sete %cl # encoding: [0x0f,0x94,0xc1]
7523 ; AVX512-NEXT: andb %al, %cl # encoding: [0x20,0xc1]
7524 ; AVX512-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
7525 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7526 %res = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %a1)
7529 declare i32 @llvm.x86.sse2.ucomieq.sd(<2 x double>, <2 x double>) nounwind readnone
7531 define i32 @test_mm_ucomige_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
7532 ; SSE-LABEL: test_mm_ucomige_sd:
7534 ; SSE-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
7535 ; SSE-NEXT: ucomisd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x2e,0xc1]
7536 ; SSE-NEXT: setae %al # encoding: [0x0f,0x93,0xc0]
7537 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7539 ; AVX1-LABEL: test_mm_ucomige_sd:
7541 ; AVX1-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
7542 ; AVX1-NEXT: vucomisd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x2e,0xc1]
7543 ; AVX1-NEXT: setae %al # encoding: [0x0f,0x93,0xc0]
7544 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7546 ; AVX512-LABEL: test_mm_ucomige_sd:
7548 ; AVX512-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
7549 ; AVX512-NEXT: vucomisd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
7550 ; AVX512-NEXT: setae %al # encoding: [0x0f,0x93,0xc0]
7551 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7552 %res = call i32 @llvm.x86.sse2.ucomige.sd(<2 x double> %a0, <2 x double> %a1)
7555 declare i32 @llvm.x86.sse2.ucomige.sd(<2 x double>, <2 x double>) nounwind readnone
7557 define i32 @test_mm_ucomigt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
7558 ; SSE-LABEL: test_mm_ucomigt_sd:
7560 ; SSE-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
7561 ; SSE-NEXT: ucomisd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x2e,0xc1]
7562 ; SSE-NEXT: seta %al # encoding: [0x0f,0x97,0xc0]
7563 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7565 ; AVX1-LABEL: test_mm_ucomigt_sd:
7567 ; AVX1-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
7568 ; AVX1-NEXT: vucomisd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x2e,0xc1]
7569 ; AVX1-NEXT: seta %al # encoding: [0x0f,0x97,0xc0]
7570 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7572 ; AVX512-LABEL: test_mm_ucomigt_sd:
7574 ; AVX512-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
7575 ; AVX512-NEXT: vucomisd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
7576 ; AVX512-NEXT: seta %al # encoding: [0x0f,0x97,0xc0]
7577 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7578 %res = call i32 @llvm.x86.sse2.ucomigt.sd(<2 x double> %a0, <2 x double> %a1)
7581 declare i32 @llvm.x86.sse2.ucomigt.sd(<2 x double>, <2 x double>) nounwind readnone
7583 define i32 @test_mm_ucomile_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
7584 ; SSE-LABEL: test_mm_ucomile_sd:
7586 ; SSE-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
7587 ; SSE-NEXT: ucomisd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x2e,0xc8]
7588 ; SSE-NEXT: setae %al # encoding: [0x0f,0x93,0xc0]
7589 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7591 ; AVX1-LABEL: test_mm_ucomile_sd:
7593 ; AVX1-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
7594 ; AVX1-NEXT: vucomisd %xmm0, %xmm1 # encoding: [0xc5,0xf9,0x2e,0xc8]
7595 ; AVX1-NEXT: setae %al # encoding: [0x0f,0x93,0xc0]
7596 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7598 ; AVX512-LABEL: test_mm_ucomile_sd:
7600 ; AVX512-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
7601 ; AVX512-NEXT: vucomisd %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc8]
7602 ; AVX512-NEXT: setae %al # encoding: [0x0f,0x93,0xc0]
7603 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7604 %res = call i32 @llvm.x86.sse2.ucomile.sd(<2 x double> %a0, <2 x double> %a1)
7607 declare i32 @llvm.x86.sse2.ucomile.sd(<2 x double>, <2 x double>) nounwind readnone
7609 define i32 @test_mm_ucomilt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
7610 ; SSE-LABEL: test_mm_ucomilt_sd:
7612 ; SSE-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
7613 ; SSE-NEXT: ucomisd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x2e,0xc8]
7614 ; SSE-NEXT: seta %al # encoding: [0x0f,0x97,0xc0]
7615 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7617 ; AVX1-LABEL: test_mm_ucomilt_sd:
7619 ; AVX1-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
7620 ; AVX1-NEXT: vucomisd %xmm0, %xmm1 # encoding: [0xc5,0xf9,0x2e,0xc8]
7621 ; AVX1-NEXT: seta %al # encoding: [0x0f,0x97,0xc0]
7622 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7624 ; AVX512-LABEL: test_mm_ucomilt_sd:
7626 ; AVX512-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
7627 ; AVX512-NEXT: vucomisd %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc8]
7628 ; AVX512-NEXT: seta %al # encoding: [0x0f,0x97,0xc0]
7629 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7630 %res = call i32 @llvm.x86.sse2.ucomilt.sd(<2 x double> %a0, <2 x double> %a1)
7633 declare i32 @llvm.x86.sse2.ucomilt.sd(<2 x double>, <2 x double>) nounwind readnone
7635 define i32 @test_mm_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
7636 ; SSE-LABEL: test_mm_ucomineq_sd:
7638 ; SSE-NEXT: ucomisd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x2e,0xc1]
7639 ; SSE-NEXT: setp %al # encoding: [0x0f,0x9a,0xc0]
7640 ; SSE-NEXT: setne %cl # encoding: [0x0f,0x95,0xc1]
7641 ; SSE-NEXT: orb %al, %cl # encoding: [0x08,0xc1]
7642 ; SSE-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
7643 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7645 ; AVX1-LABEL: test_mm_ucomineq_sd:
7647 ; AVX1-NEXT: vucomisd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x2e,0xc1]
7648 ; AVX1-NEXT: setp %al # encoding: [0x0f,0x9a,0xc0]
7649 ; AVX1-NEXT: setne %cl # encoding: [0x0f,0x95,0xc1]
7650 ; AVX1-NEXT: orb %al, %cl # encoding: [0x08,0xc1]
7651 ; AVX1-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
7652 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7654 ; AVX512-LABEL: test_mm_ucomineq_sd:
7656 ; AVX512-NEXT: vucomisd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
7657 ; AVX512-NEXT: setp %al # encoding: [0x0f,0x9a,0xc0]
7658 ; AVX512-NEXT: setne %cl # encoding: [0x0f,0x95,0xc1]
7659 ; AVX512-NEXT: orb %al, %cl # encoding: [0x08,0xc1]
7660 ; AVX512-NEXT: movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
7661 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7662 %res = call i32 @llvm.x86.sse2.ucomineq.sd(<2 x double> %a0, <2 x double> %a1)
7665 declare i32 @llvm.x86.sse2.ucomineq.sd(<2 x double>, <2 x double>) nounwind readnone
7667 define <2 x double> @test_mm_undefined_pd() {
7668 ; CHECK-LABEL: test_mm_undefined_pd:
7670 ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7671 ret <2 x double> undef
7674 define <2 x i64> @test_mm_undefined_si128() {
7675 ; CHECK-LABEL: test_mm_undefined_si128:
7677 ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7681 define <2 x i64> @test_mm_unpackhi_epi8(<2 x i64> %a0, <2 x i64> %a1) {
7682 ; SSE-LABEL: test_mm_unpackhi_epi8:
7684 ; SSE-NEXT: punpckhbw %xmm1, %xmm0 # encoding: [0x66,0x0f,0x68,0xc1]
7685 ; SSE-NEXT: # xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
7686 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7688 ; AVX1-LABEL: test_mm_unpackhi_epi8:
7690 ; AVX1-NEXT: vpunpckhbw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x68,0xc1]
7691 ; AVX1-NEXT: # xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
7692 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7694 ; AVX512-LABEL: test_mm_unpackhi_epi8:
7696 ; AVX512-NEXT: vpunpckhbw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x68,0xc1]
7697 ; AVX512-NEXT: # xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
7698 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7699 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
7700 %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
7701 %res = shufflevector <16 x i8> %arg0, <16 x i8> %arg1, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
7702 %bc = bitcast <16 x i8> %res to <2 x i64>
7706 define <2 x i64> @test_mm_unpackhi_epi16(<2 x i64> %a0, <2 x i64> %a1) {
7707 ; SSE-LABEL: test_mm_unpackhi_epi16:
7709 ; SSE-NEXT: punpckhwd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x69,0xc1]
7710 ; SSE-NEXT: # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
7711 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7713 ; AVX1-LABEL: test_mm_unpackhi_epi16:
7715 ; AVX1-NEXT: vpunpckhwd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x69,0xc1]
7716 ; AVX1-NEXT: # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
7717 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7719 ; AVX512-LABEL: test_mm_unpackhi_epi16:
7721 ; AVX512-NEXT: vpunpckhwd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x69,0xc1]
7722 ; AVX512-NEXT: # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
7723 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7724 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
7725 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
7726 %res = shufflevector <8 x i16> %arg0, <8 x i16> %arg1, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
7727 %bc = bitcast <8 x i16> %res to <2 x i64>
7731 define <2 x i64> @test_mm_unpackhi_epi32(<2 x i64> %a0, <2 x i64> %a1) {
7732 ; SSE-LABEL: test_mm_unpackhi_epi32:
7734 ; SSE-NEXT: unpckhps %xmm1, %xmm0 # encoding: [0x0f,0x15,0xc1]
7735 ; SSE-NEXT: # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
7736 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7738 ; AVX1-LABEL: test_mm_unpackhi_epi32:
7740 ; AVX1-NEXT: vunpckhps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x15,0xc1]
7741 ; AVX1-NEXT: # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
7742 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7744 ; AVX512-LABEL: test_mm_unpackhi_epi32:
7746 ; AVX512-NEXT: vunpckhps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x15,0xc1]
7747 ; AVX512-NEXT: # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
7748 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7749 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
7750 %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
7751 %res = shufflevector <4 x i32> %arg0,<4 x i32> %arg1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
7752 %bc = bitcast <4 x i32> %res to <2 x i64>
7756 define <2 x i64> @test_mm_unpackhi_epi64(<2 x i64> %a0, <2 x i64> %a1) {
7757 ; SSE-LABEL: test_mm_unpackhi_epi64:
7759 ; SSE-NEXT: unpckhpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x15,0xc1]
7760 ; SSE-NEXT: # xmm0 = xmm0[1],xmm1[1]
7761 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7763 ; AVX1-LABEL: test_mm_unpackhi_epi64:
7765 ; AVX1-NEXT: vunpckhpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x15,0xc1]
7766 ; AVX1-NEXT: # xmm0 = xmm0[1],xmm1[1]
7767 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7769 ; AVX512-LABEL: test_mm_unpackhi_epi64:
7771 ; AVX512-NEXT: vunpckhpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x15,0xc1]
7772 ; AVX512-NEXT: # xmm0 = xmm0[1],xmm1[1]
7773 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7774 %res = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 1, i32 3>
7778 define <2 x double> @test_mm_unpackhi_pd(<2 x double> %a0, <2 x double> %a1) {
7779 ; SSE-LABEL: test_mm_unpackhi_pd:
7781 ; SSE-NEXT: unpckhpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x15,0xc1]
7782 ; SSE-NEXT: # xmm0 = xmm0[1],xmm1[1]
7783 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7785 ; AVX1-LABEL: test_mm_unpackhi_pd:
7787 ; AVX1-NEXT: vunpckhpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x15,0xc1]
7788 ; AVX1-NEXT: # xmm0 = xmm0[1],xmm1[1]
7789 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7791 ; AVX512-LABEL: test_mm_unpackhi_pd:
7793 ; AVX512-NEXT: vunpckhpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x15,0xc1]
7794 ; AVX512-NEXT: # xmm0 = xmm0[1],xmm1[1]
7795 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7796 %res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 3>
7797 ret <2 x double> %res
7800 define <2 x i64> @test_mm_unpacklo_epi8(<2 x i64> %a0, <2 x i64> %a1) {
7801 ; SSE-LABEL: test_mm_unpacklo_epi8:
7803 ; SSE-NEXT: punpcklbw %xmm1, %xmm0 # encoding: [0x66,0x0f,0x60,0xc1]
7804 ; SSE-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
7805 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7807 ; AVX1-LABEL: test_mm_unpacklo_epi8:
7809 ; AVX1-NEXT: vpunpcklbw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x60,0xc1]
7810 ; AVX1-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
7811 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7813 ; AVX512-LABEL: test_mm_unpacklo_epi8:
7815 ; AVX512-NEXT: vpunpcklbw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x60,0xc1]
7816 ; AVX512-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
7817 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7818 %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
7819 %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
7820 %res = shufflevector <16 x i8> %arg0, <16 x i8> %arg1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
7821 %bc = bitcast <16 x i8> %res to <2 x i64>
7825 define <2 x i64> @test_mm_unpacklo_epi16(<2 x i64> %a0, <2 x i64> %a1) {
7826 ; SSE-LABEL: test_mm_unpacklo_epi16:
7828 ; SSE-NEXT: punpcklwd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x61,0xc1]
7829 ; SSE-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
7830 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7832 ; AVX1-LABEL: test_mm_unpacklo_epi16:
7834 ; AVX1-NEXT: vpunpcklwd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x61,0xc1]
7835 ; AVX1-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
7836 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7838 ; AVX512-LABEL: test_mm_unpacklo_epi16:
7840 ; AVX512-NEXT: vpunpcklwd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x61,0xc1]
7841 ; AVX512-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
7842 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7843 %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
7844 %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
7845 %res = shufflevector <8 x i16> %arg0, <8 x i16> %arg1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
7846 %bc = bitcast <8 x i16> %res to <2 x i64>
7850 define <2 x i64> @test_mm_unpacklo_epi32(<2 x i64> %a0, <2 x i64> %a1) {
7851 ; SSE-LABEL: test_mm_unpacklo_epi32:
7853 ; SSE-NEXT: unpcklps %xmm1, %xmm0 # encoding: [0x0f,0x14,0xc1]
7854 ; SSE-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
7855 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7857 ; AVX1-LABEL: test_mm_unpacklo_epi32:
7859 ; AVX1-NEXT: vunpcklps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x14,0xc1]
7860 ; AVX1-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
7861 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7863 ; AVX512-LABEL: test_mm_unpacklo_epi32:
7865 ; AVX512-NEXT: vunpcklps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x14,0xc1]
7866 ; AVX512-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
7867 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7868 %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
7869 %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
7870 %res = shufflevector <4 x i32> %arg0,<4 x i32> %arg1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
7871 %bc = bitcast <4 x i32> %res to <2 x i64>
7875 define <2 x i64> @test_mm_unpacklo_epi64(<2 x i64> %a0, <2 x i64> %a1) {
7876 ; SSE-LABEL: test_mm_unpacklo_epi64:
7878 ; SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
7879 ; SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
7880 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7882 ; AVX1-LABEL: test_mm_unpacklo_epi64:
7884 ; AVX1-NEXT: vmovlhps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x16,0xc1]
7885 ; AVX1-NEXT: # xmm0 = xmm0[0],xmm1[0]
7886 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7888 ; AVX512-LABEL: test_mm_unpacklo_epi64:
7890 ; AVX512-NEXT: vmovlhps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x16,0xc1]
7891 ; AVX512-NEXT: # xmm0 = xmm0[0],xmm1[0]
7892 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7893 %res = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 0, i32 2>
7897 define <2 x double> @test_mm_unpacklo_pd(<2 x double> %a0, <2 x double> %a1) {
7898 ; SSE-LABEL: test_mm_unpacklo_pd:
7900 ; SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
7901 ; SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
7902 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7904 ; AVX1-LABEL: test_mm_unpacklo_pd:
7906 ; AVX1-NEXT: vmovlhps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x16,0xc1]
7907 ; AVX1-NEXT: # xmm0 = xmm0[0],xmm1[0]
7908 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7910 ; AVX512-LABEL: test_mm_unpacklo_pd:
7912 ; AVX512-NEXT: vmovlhps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x16,0xc1]
7913 ; AVX512-NEXT: # xmm0 = xmm0[0],xmm1[0]
7914 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7915 %res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 2>
7916 ret <2 x double> %res
7919 define <2 x double> @test_mm_xor_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
7920 ; SSE-LABEL: test_mm_xor_pd:
7922 ; SSE-NEXT: xorps %xmm1, %xmm0 # encoding: [0x0f,0x57,0xc1]
7923 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7925 ; AVX1-LABEL: test_mm_xor_pd:
7927 ; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x57,0xc1]
7928 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7930 ; AVX512-LABEL: test_mm_xor_pd:
7932 ; AVX512-NEXT: vxorps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x57,0xc1]
7933 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7934 %arg0 = bitcast <2 x double> %a0 to <4 x i32>
7935 %arg1 = bitcast <2 x double> %a1 to <4 x i32>
7936 %res = xor <4 x i32> %arg0, %arg1
7937 %bc = bitcast <4 x i32> %res to <2 x double>
7938 ret <2 x double> %bc
7941 define <2 x i64> @test_mm_xor_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
7942 ; SSE-LABEL: test_mm_xor_si128:
7944 ; SSE-NEXT: xorps %xmm1, %xmm0 # encoding: [0x0f,0x57,0xc1]
7945 ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7947 ; AVX1-LABEL: test_mm_xor_si128:
7949 ; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x57,0xc1]
7950 ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7952 ; AVX512-LABEL: test_mm_xor_si128:
7954 ; AVX512-NEXT: vxorps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x57,0xc1]
7955 ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
7956 %res = xor <2 x i64> %a0, %a1