1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx,+f16c -show-mc-encoding -disable-peephole | FileCheck %s --check-prefix=X32
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+f16c -show-mc-encoding -disable-peephole | FileCheck %s --check-prefix=X64
4 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512vl -show-mc-encoding -disable-peephole | FileCheck %s --check-prefix=X32-AVX512VL
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl -show-mc-encoding -disable-peephole | FileCheck %s --check-prefix=X64-AVX512VL
7 define <4 x float> @test_x86_vcvtph2ps_128(<8 x i16> %a0) {
8 ; X32-LABEL: test_x86_vcvtph2ps_128:
10 ; X32-NEXT: vcvtph2ps %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0xc0]
11 ; X32-NEXT: retl # encoding: [0xc3]
13 ; X64-LABEL: test_x86_vcvtph2ps_128:
15 ; X64-NEXT: vcvtph2ps %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0xc0]
16 ; X64-NEXT: retq # encoding: [0xc3]
18 ; X32-AVX512VL-LABEL: test_x86_vcvtph2ps_128:
19 ; X32-AVX512VL: # %bb.0:
20 ; X32-AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0xc0]
21 ; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
23 ; X64-AVX512VL-LABEL: test_x86_vcvtph2ps_128:
24 ; X64-AVX512VL: # %bb.0:
25 ; X64-AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0xc0]
26 ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
27 %res = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %a0) ; <<4 x float>> [#uses=1]
30 declare <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16>) nounwind readonly
32 define <4 x float> @test_x86_vcvtph2ps_128_m(<8 x i16>* nocapture %a) {
33 ; X32-LABEL: test_x86_vcvtph2ps_128_m:
35 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
36 ; X32-NEXT: vcvtph2ps (%eax), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x00]
37 ; X32-NEXT: retl # encoding: [0xc3]
39 ; X64-LABEL: test_x86_vcvtph2ps_128_m:
41 ; X64-NEXT: vcvtph2ps (%rdi), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x07]
42 ; X64-NEXT: retq # encoding: [0xc3]
44 ; X32-AVX512VL-LABEL: test_x86_vcvtph2ps_128_m:
45 ; X32-AVX512VL: # %bb.0:
46 ; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
47 ; X32-AVX512VL-NEXT: vcvtph2ps (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x00]
48 ; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
50 ; X64-AVX512VL-LABEL: test_x86_vcvtph2ps_128_m:
51 ; X64-AVX512VL: # %bb.0:
52 ; X64-AVX512VL-NEXT: vcvtph2ps (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x07]
53 ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
54 %load = load <8 x i16>, <8 x i16>* %a
55 %res = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %load) ; <<4 x float>> [#uses=1]
59 define <8 x float> @test_x86_vcvtph2ps_256(<8 x i16> %a0) {
60 ; X32-LABEL: test_x86_vcvtph2ps_256:
62 ; X32-NEXT: vcvtph2ps %xmm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x13,0xc0]
63 ; X32-NEXT: retl # encoding: [0xc3]
65 ; X64-LABEL: test_x86_vcvtph2ps_256:
67 ; X64-NEXT: vcvtph2ps %xmm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x13,0xc0]
68 ; X64-NEXT: retq # encoding: [0xc3]
70 ; X32-AVX512VL-LABEL: test_x86_vcvtph2ps_256:
71 ; X32-AVX512VL: # %bb.0:
72 ; X32-AVX512VL-NEXT: vcvtph2ps %xmm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x13,0xc0]
73 ; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
75 ; X64-AVX512VL-LABEL: test_x86_vcvtph2ps_256:
76 ; X64-AVX512VL: # %bb.0:
77 ; X64-AVX512VL-NEXT: vcvtph2ps %xmm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x13,0xc0]
78 ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
79 %res = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %a0) ; <<8 x float>> [#uses=1]
82 declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readonly
84 define <8 x float> @test_x86_vcvtph2ps_256_m(<8 x i16>* nocapture %a) nounwind {
85 ; X32-LABEL: test_x86_vcvtph2ps_256_m:
87 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
88 ; X32-NEXT: vcvtph2ps (%eax), %ymm0 # encoding: [0xc4,0xe2,0x7d,0x13,0x00]
89 ; X32-NEXT: retl # encoding: [0xc3]
91 ; X64-LABEL: test_x86_vcvtph2ps_256_m:
93 ; X64-NEXT: vcvtph2ps (%rdi), %ymm0 # encoding: [0xc4,0xe2,0x7d,0x13,0x07]
94 ; X64-NEXT: retq # encoding: [0xc3]
96 ; X32-AVX512VL-LABEL: test_x86_vcvtph2ps_256_m:
97 ; X32-AVX512VL: # %bb.0:
98 ; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
99 ; X32-AVX512VL-NEXT: vcvtph2ps (%eax), %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x13,0x00]
100 ; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
102 ; X64-AVX512VL-LABEL: test_x86_vcvtph2ps_256_m:
103 ; X64-AVX512VL: # %bb.0:
104 ; X64-AVX512VL-NEXT: vcvtph2ps (%rdi), %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x13,0x07]
105 ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
106 %load = load <8 x i16>, <8 x i16>* %a
107 %res = tail call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %load)
111 define <8 x i16> @test_x86_vcvtps2ph_128(<4 x float> %a0) {
112 ; X32-LABEL: test_x86_vcvtps2ph_128:
114 ; X32-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x00]
115 ; X32-NEXT: retl # encoding: [0xc3]
117 ; X64-LABEL: test_x86_vcvtps2ph_128:
119 ; X64-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x00]
120 ; X64-NEXT: retq # encoding: [0xc3]
122 ; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128:
123 ; X32-AVX512VL: # %bb.0:
124 ; X32-AVX512VL-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x00]
125 ; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
127 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128:
128 ; X64-AVX512VL: # %bb.0:
129 ; X64-AVX512VL-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x00]
130 ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
131 %res = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0) ; <<8 x i16>> [#uses=1]
134 declare <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float>, i32) nounwind readonly
136 define <8 x i16> @test_x86_vcvtps2ph_256(<8 x float> %a0) {
137 ; X32-LABEL: test_x86_vcvtps2ph_256:
139 ; X32-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # encoding: [0xc4,0xe3,0x7d,0x1d,0xc0,0x00]
140 ; X32-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
141 ; X32-NEXT: retl # encoding: [0xc3]
143 ; X64-LABEL: test_x86_vcvtps2ph_256:
145 ; X64-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # encoding: [0xc4,0xe3,0x7d,0x1d,0xc0,0x00]
146 ; X64-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
147 ; X64-NEXT: retq # encoding: [0xc3]
149 ; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_256:
150 ; X32-AVX512VL: # %bb.0:
151 ; X32-AVX512VL-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0xc0,0x00]
152 ; X32-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
153 ; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
155 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_256:
156 ; X64-AVX512VL: # %bb.0:
157 ; X64-AVX512VL-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0xc0,0x00]
158 ; X64-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
159 ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
160 %res = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a0, i32 0) ; <<8 x i16>> [#uses=1]
163 declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readonly
165 define <4 x float> @test_x86_vcvtps2ph_128_scalar(i64* %ptr) {
166 ; X32-LABEL: test_x86_vcvtps2ph_128_scalar:
168 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
169 ; X32-NEXT: vcvtph2ps (%eax), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x00]
170 ; X32-NEXT: retl # encoding: [0xc3]
172 ; X64-LABEL: test_x86_vcvtps2ph_128_scalar:
174 ; X64-NEXT: vcvtph2ps (%rdi), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x07]
175 ; X64-NEXT: retq # encoding: [0xc3]
177 ; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128_scalar:
178 ; X32-AVX512VL: # %bb.0:
179 ; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
180 ; X32-AVX512VL-NEXT: vcvtph2ps (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x00]
181 ; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
183 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_scalar:
184 ; X64-AVX512VL: # %bb.0:
185 ; X64-AVX512VL-NEXT: vcvtph2ps (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x07]
186 ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
187 %load = load i64, i64* %ptr
188 %ins1 = insertelement <2 x i64> undef, i64 %load, i32 0
189 %ins2 = insertelement <2 x i64> %ins1, i64 0, i32 1
190 %bc = bitcast <2 x i64> %ins2 to <8 x i16>
191 %res = tail call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %bc) #2
195 define <4 x float> @test_x86_vcvtps2ph_128_scalar2(i64* %ptr) {
196 ; X32-LABEL: test_x86_vcvtps2ph_128_scalar2:
198 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
199 ; X32-NEXT: vcvtph2ps (%eax), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x00]
200 ; X32-NEXT: retl # encoding: [0xc3]
202 ; X64-LABEL: test_x86_vcvtps2ph_128_scalar2:
204 ; X64-NEXT: vcvtph2ps (%rdi), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x07]
205 ; X64-NEXT: retq # encoding: [0xc3]
207 ; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128_scalar2:
208 ; X32-AVX512VL: # %bb.0:
209 ; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
210 ; X32-AVX512VL-NEXT: vcvtph2ps (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x00]
211 ; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
213 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_scalar2:
214 ; X64-AVX512VL: # %bb.0:
215 ; X64-AVX512VL-NEXT: vcvtph2ps (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x07]
216 ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
217 %load = load i64, i64* %ptr
218 %ins = insertelement <2 x i64> undef, i64 %load, i32 0
219 %bc = bitcast <2 x i64> %ins to <8 x i16>
220 %res = tail call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %bc)
224 define void @test_x86_vcvtps2ph_256_m(<8 x i16>* nocapture %d, <8 x float> %a) nounwind {
225 ; X32-LABEL: test_x86_vcvtps2ph_256_m:
226 ; X32: # %bb.0: # %entry
227 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
228 ; X32-NEXT: vcvtps2ph $3, %ymm0, (%eax) # encoding: [0xc4,0xe3,0x7d,0x1d,0x00,0x03]
229 ; X32-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
230 ; X32-NEXT: retl # encoding: [0xc3]
232 ; X64-LABEL: test_x86_vcvtps2ph_256_m:
233 ; X64: # %bb.0: # %entry
234 ; X64-NEXT: vcvtps2ph $3, %ymm0, (%rdi) # encoding: [0xc4,0xe3,0x7d,0x1d,0x07,0x03]
235 ; X64-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
236 ; X64-NEXT: retq # encoding: [0xc3]
238 ; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_256_m:
239 ; X32-AVX512VL: # %bb.0: # %entry
240 ; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
241 ; X32-AVX512VL-NEXT: vcvtps2ph $3, %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0x00,0x03]
242 ; X32-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
243 ; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
245 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_256_m:
246 ; X64-AVX512VL: # %bb.0: # %entry
247 ; X64-AVX512VL-NEXT: vcvtps2ph $3, %ymm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0x07,0x03]
248 ; X64-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
249 ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
251 %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a, i32 3)
252 store <8 x i16> %0, <8 x i16>* %d, align 16
256 define void @test_x86_vcvtps2ph_128_m(<4 x i16>* nocapture %d, <4 x float> %a) nounwind {
257 ; X32-LABEL: test_x86_vcvtps2ph_128_m:
258 ; X32: # %bb.0: # %entry
259 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
260 ; X32-NEXT: vcvtps2ph $3, %xmm0, (%eax) # encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
261 ; X32-NEXT: retl # encoding: [0xc3]
263 ; X64-LABEL: test_x86_vcvtps2ph_128_m:
264 ; X64: # %bb.0: # %entry
265 ; X64-NEXT: vcvtps2ph $3, %xmm0, (%rdi) # encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
266 ; X64-NEXT: retq # encoding: [0xc3]
268 ; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m:
269 ; X32-AVX512VL: # %bb.0: # %entry
270 ; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
271 ; X32-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
272 ; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
274 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m:
275 ; X64-AVX512VL: # %bb.0: # %entry
276 ; X64-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
277 ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
279 %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a, i32 3)
280 %1 = shufflevector <8 x i16> %0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
281 store <4 x i16> %1, <4 x i16>* %d, align 8
285 define void @test_x86_vcvtps2ph_128_m2(double* nocapture %hf4x16, <4 x float> %f4x32) #0 {
286 ; X32-LABEL: test_x86_vcvtps2ph_128_m2:
287 ; X32: # %bb.0: # %entry
288 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
289 ; X32-NEXT: vcvtps2ph $3, %xmm0, (%eax) # encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
290 ; X32-NEXT: retl # encoding: [0xc3]
292 ; X64-LABEL: test_x86_vcvtps2ph_128_m2:
293 ; X64: # %bb.0: # %entry
294 ; X64-NEXT: vcvtps2ph $3, %xmm0, (%rdi) # encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
295 ; X64-NEXT: retq # encoding: [0xc3]
297 ; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m2:
298 ; X32-AVX512VL: # %bb.0: # %entry
299 ; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
300 ; X32-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
301 ; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
303 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m2:
304 ; X64-AVX512VL: # %bb.0: # %entry
305 ; X64-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
306 ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
308 %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %f4x32, i32 3)
309 %1 = bitcast <8 x i16> %0 to <2 x double>
310 %vecext = extractelement <2 x double> %1, i32 0
311 store double %vecext, double* %hf4x16, align 8
315 define void @test_x86_vcvtps2ph_128_m3(i64* nocapture %hf4x16, <4 x float> %f4x32) #0 {
316 ; X32-LABEL: test_x86_vcvtps2ph_128_m3:
317 ; X32: # %bb.0: # %entry
318 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
319 ; X32-NEXT: vcvtps2ph $3, %xmm0, (%eax) # encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
320 ; X32-NEXT: retl # encoding: [0xc3]
322 ; X64-LABEL: test_x86_vcvtps2ph_128_m3:
323 ; X64: # %bb.0: # %entry
324 ; X64-NEXT: vcvtps2ph $3, %xmm0, (%rdi) # encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
325 ; X64-NEXT: retq # encoding: [0xc3]
327 ; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m3:
328 ; X32-AVX512VL: # %bb.0: # %entry
329 ; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
330 ; X32-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
331 ; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
333 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m3:
334 ; X64-AVX512VL: # %bb.0: # %entry
335 ; X64-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
336 ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
338 %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %f4x32, i32 3)
339 %1 = bitcast <8 x i16> %0 to <2 x i64>
340 %vecext = extractelement <2 x i64> %1, i32 0
341 store i64 %vecext, i64* %hf4x16, align 8