1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx,+f16c -show-mc-encoding -disable-peephole | FileCheck %s --check-prefixes=AVX,X86
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+f16c -show-mc-encoding -disable-peephole | FileCheck %s --check-prefixes=AVX,X64
4 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512vl -show-mc-encoding -disable-peephole | FileCheck %s --check-prefixes=AVX512VL,X86-AVX512VL
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl -show-mc-encoding -disable-peephole | FileCheck %s --check-prefixes=AVX512VL,X64-AVX512VL
7 define <8 x i16> @test_x86_vcvtps2ph_128(<4 x float> %a0) {
8 ; AVX-LABEL: test_x86_vcvtps2ph_128:
10 ; AVX-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x00]
11 ; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
13 ; AVX512VL-LABEL: test_x86_vcvtps2ph_128:
15 ; AVX512VL-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x00]
16 ; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
17 %res = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0) ; <<8 x i16>> [#uses=1]
20 declare <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float>, i32) nounwind readonly
22 define <8 x i16> @test_x86_vcvtps2ph_256(<8 x float> %a0) {
23 ; AVX-LABEL: test_x86_vcvtps2ph_256:
25 ; AVX-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # encoding: [0xc4,0xe3,0x7d,0x1d,0xc0,0x00]
26 ; AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
27 ; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
29 ; AVX512VL-LABEL: test_x86_vcvtps2ph_256:
31 ; AVX512VL-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0xc0,0x00]
32 ; AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
33 ; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
34 %res = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a0, i32 0) ; <<8 x i16>> [#uses=1]
37 declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readonly
40 define void @test_x86_vcvtps2ph_256_m(ptr nocapture %d, <8 x float> %a) nounwind {
41 ; X86-LABEL: test_x86_vcvtps2ph_256_m:
42 ; X86: # %bb.0: # %entry
43 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
44 ; X86-NEXT: vcvtps2ph $3, %ymm0, (%eax) # encoding: [0xc4,0xe3,0x7d,0x1d,0x00,0x03]
45 ; X86-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
46 ; X86-NEXT: retl # encoding: [0xc3]
48 ; X64-LABEL: test_x86_vcvtps2ph_256_m:
49 ; X64: # %bb.0: # %entry
50 ; X64-NEXT: vcvtps2ph $3, %ymm0, (%rdi) # encoding: [0xc4,0xe3,0x7d,0x1d,0x07,0x03]
51 ; X64-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
52 ; X64-NEXT: retq # encoding: [0xc3]
54 ; X86-AVX512VL-LABEL: test_x86_vcvtps2ph_256_m:
55 ; X86-AVX512VL: # %bb.0: # %entry
56 ; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
57 ; X86-AVX512VL-NEXT: vcvtps2ph $3, %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0x00,0x03]
58 ; X86-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
59 ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
61 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_256_m:
62 ; X64-AVX512VL: # %bb.0: # %entry
63 ; X64-AVX512VL-NEXT: vcvtps2ph $3, %ymm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0x07,0x03]
64 ; X64-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
65 ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
67 %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a, i32 3)
68 store <8 x i16> %0, ptr %d, align 16
72 define void @test_x86_vcvtps2ph_128_m(ptr nocapture %d, <4 x float> %a) nounwind {
73 ; X86-LABEL: test_x86_vcvtps2ph_128_m:
74 ; X86: # %bb.0: # %entry
75 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
76 ; X86-NEXT: vcvtps2ph $3, %xmm0, (%eax) # encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
77 ; X86-NEXT: retl # encoding: [0xc3]
79 ; X64-LABEL: test_x86_vcvtps2ph_128_m:
80 ; X64: # %bb.0: # %entry
81 ; X64-NEXT: vcvtps2ph $3, %xmm0, (%rdi) # encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
82 ; X64-NEXT: retq # encoding: [0xc3]
84 ; X86-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m:
85 ; X86-AVX512VL: # %bb.0: # %entry
86 ; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
87 ; X86-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
88 ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
90 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m:
91 ; X64-AVX512VL: # %bb.0: # %entry
92 ; X64-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
93 ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
95 %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a, i32 3)
96 %1 = shufflevector <8 x i16> %0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
97 store <4 x i16> %1, ptr %d, align 8
101 define void @test_x86_vcvtps2ph_128_m2(ptr nocapture %hf4x16, <4 x float> %f4X86) #0 {
102 ; X86-LABEL: test_x86_vcvtps2ph_128_m2:
103 ; X86: # %bb.0: # %entry
104 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
105 ; X86-NEXT: vcvtps2ph $3, %xmm0, (%eax) # encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
106 ; X86-NEXT: retl # encoding: [0xc3]
108 ; X64-LABEL: test_x86_vcvtps2ph_128_m2:
109 ; X64: # %bb.0: # %entry
110 ; X64-NEXT: vcvtps2ph $3, %xmm0, (%rdi) # encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
111 ; X64-NEXT: retq # encoding: [0xc3]
113 ; X86-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m2:
114 ; X86-AVX512VL: # %bb.0: # %entry
115 ; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
116 ; X86-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
117 ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
119 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m2:
120 ; X64-AVX512VL: # %bb.0: # %entry
121 ; X64-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
122 ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
124 %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %f4X86, i32 3)
125 %1 = bitcast <8 x i16> %0 to <2 x double>
126 %vecext = extractelement <2 x double> %1, i32 0
127 store double %vecext, ptr %hf4x16, align 8
131 define void @test_x86_vcvtps2ph_128_m3(ptr nocapture %hf4x16, <4 x float> %f4X86) #0 {
132 ; X86-LABEL: test_x86_vcvtps2ph_128_m3:
133 ; X86: # %bb.0: # %entry
134 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
135 ; X86-NEXT: vcvtps2ph $3, %xmm0, (%eax) # encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
136 ; X86-NEXT: retl # encoding: [0xc3]
138 ; X64-LABEL: test_x86_vcvtps2ph_128_m3:
139 ; X64: # %bb.0: # %entry
140 ; X64-NEXT: vcvtps2ph $3, %xmm0, (%rdi) # encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
141 ; X64-NEXT: retq # encoding: [0xc3]
143 ; X86-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m3:
144 ; X86-AVX512VL: # %bb.0: # %entry
145 ; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
146 ; X86-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
147 ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
149 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m3:
150 ; X64-AVX512VL: # %bb.0: # %entry
151 ; X64-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
152 ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
154 %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %f4X86, i32 3)
155 %1 = bitcast <8 x i16> %0 to <2 x i64>
156 %vecext = extractelement <2 x i64> %1, i32 0
157 store i64 %vecext, ptr %hf4x16, align 8