1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=SSE,X86-SSE
3 ; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=AVX,AVX1,X86-AVX1
4 ; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=AVX,AVX512,X86-AVX512
5 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=SSE,X64-SSE
6 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=AVX,AVX1,X64-AVX1
7 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=AVX,AVX512,X64-AVX512
9 define <2 x double> @test_x86_sse41_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
10 ; SSE-LABEL: test_x86_sse41_blendvpd:
12 ; SSE-NEXT: movapd %xmm0, %xmm3 ## encoding: [0x66,0x0f,0x28,0xd8]
13 ; SSE-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
14 ; SSE-NEXT: blendvpd %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x15,0xd9]
15 ; SSE-NEXT: movapd %xmm3, %xmm0 ## encoding: [0x66,0x0f,0x28,0xc3]
16 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
18 ; AVX-LABEL: test_x86_sse41_blendvpd:
20 ; AVX-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4b,0xc1,0x20]
21 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
22 %res = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) ; <<2 x double>> [#uses=1]
25 declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
28 define <4 x float> @test_x86_sse41_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
29 ; SSE-LABEL: test_x86_sse41_blendvps:
31 ; SSE-NEXT: movaps %xmm0, %xmm3 ## encoding: [0x0f,0x28,0xd8]
32 ; SSE-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
33 ; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x14,0xd9]
34 ; SSE-NEXT: movaps %xmm3, %xmm0 ## encoding: [0x0f,0x28,0xc3]
35 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
37 ; AVX-LABEL: test_x86_sse41_blendvps:
39 ; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4a,0xc1,0x20]
40 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
41 %res = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) ; <<4 x float>> [#uses=1]
44 declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
47 define <2 x double> @test_x86_sse41_dppd(<2 x double> %a0, <2 x double> %a1) {
48 ; SSE-LABEL: test_x86_sse41_dppd:
50 ; SSE-NEXT: dppd $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x41,0xc1,0x07]
51 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
53 ; AVX-LABEL: test_x86_sse41_dppd:
55 ; AVX-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x41,0xc1,0x07]
56 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
57 %res = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7) ; <<2 x double>> [#uses=1]
60 declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i8) nounwind readnone
63 define <4 x float> @test_x86_sse41_dpps(<4 x float> %a0, <4 x float> %a1) {
64 ; SSE-LABEL: test_x86_sse41_dpps:
66 ; SSE-NEXT: dpps $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x40,0xc1,0x07]
67 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
69 ; AVX-LABEL: test_x86_sse41_dpps:
71 ; AVX-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x40,0xc1,0x07]
72 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
73 %res = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
76 declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i8) nounwind readnone
79 define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) {
80 ; SSE-LABEL: test_x86_sse41_insertps:
82 ; SSE-NEXT: insertps $17, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x11]
83 ; SSE-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
84 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
86 ; AVX1-LABEL: test_x86_sse41_insertps:
88 ; AVX1-NEXT: vinsertps $17, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
89 ; AVX1-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
90 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
92 ; AVX512-LABEL: test_x86_sse41_insertps:
94 ; AVX512-NEXT: vinsertps $17, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
95 ; AVX512-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
96 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
97 %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 17) ; <<4 x float>> [#uses=1]
100 declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounwind readnone
104 define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) {
105 ; SSE-LABEL: test_x86_sse41_mpsadbw:
107 ; SSE-NEXT: mpsadbw $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x42,0xc1,0x07]
108 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
110 ; AVX-LABEL: test_x86_sse41_mpsadbw:
112 ; AVX-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x42,0xc1,0x07]
113 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
114 %res = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <<8 x i16>> [#uses=1]
117 declare <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8>, <16 x i8>, i8) nounwind readnone
119 ; We shouldn't commute this operation to fold the load.
120 define <8 x i16> @test_x86_sse41_mpsadbw_load_op0(ptr %ptr, <16 x i8> %a1) {
121 ; X86-SSE-LABEL: test_x86_sse41_mpsadbw_load_op0:
123 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
124 ; X86-SSE-NEXT: movdqa (%eax), %xmm1 ## encoding: [0x66,0x0f,0x6f,0x08]
125 ; X86-SSE-NEXT: mpsadbw $7, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x42,0xc8,0x07]
126 ; X86-SSE-NEXT: movdqa %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc1]
127 ; X86-SSE-NEXT: retl ## encoding: [0xc3]
129 ; X86-AVX1-LABEL: test_x86_sse41_mpsadbw_load_op0:
130 ; X86-AVX1: ## %bb.0:
131 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
132 ; X86-AVX1-NEXT: vmovdqa (%eax), %xmm1 ## encoding: [0xc5,0xf9,0x6f,0x08]
133 ; X86-AVX1-NEXT: vmpsadbw $7, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x42,0xc0,0x07]
134 ; X86-AVX1-NEXT: retl ## encoding: [0xc3]
136 ; X86-AVX512-LABEL: test_x86_sse41_mpsadbw_load_op0:
137 ; X86-AVX512: ## %bb.0:
138 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
139 ; X86-AVX512-NEXT: vmovdqa (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x08]
140 ; X86-AVX512-NEXT: vmpsadbw $7, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x42,0xc0,0x07]
141 ; X86-AVX512-NEXT: retl ## encoding: [0xc3]
143 ; X64-SSE-LABEL: test_x86_sse41_mpsadbw_load_op0:
145 ; X64-SSE-NEXT: movdqa (%rdi), %xmm1 ## encoding: [0x66,0x0f,0x6f,0x0f]
146 ; X64-SSE-NEXT: mpsadbw $7, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x42,0xc8,0x07]
147 ; X64-SSE-NEXT: movdqa %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc1]
148 ; X64-SSE-NEXT: retq ## encoding: [0xc3]
150 ; X64-AVX1-LABEL: test_x86_sse41_mpsadbw_load_op0:
151 ; X64-AVX1: ## %bb.0:
152 ; X64-AVX1-NEXT: vmovdqa (%rdi), %xmm1 ## encoding: [0xc5,0xf9,0x6f,0x0f]
153 ; X64-AVX1-NEXT: vmpsadbw $7, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x42,0xc0,0x07]
154 ; X64-AVX1-NEXT: retq ## encoding: [0xc3]
156 ; X64-AVX512-LABEL: test_x86_sse41_mpsadbw_load_op0:
157 ; X64-AVX512: ## %bb.0:
158 ; X64-AVX512-NEXT: vmovdqa (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x0f]
159 ; X64-AVX512-NEXT: vmpsadbw $7, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x42,0xc0,0x07]
160 ; X64-AVX512-NEXT: retq ## encoding: [0xc3]
161 %a0 = load <16 x i8>, ptr %ptr
162 %res = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <<8 x i16>> [#uses=1]
166 define <8 x i16> @test_x86_sse41_packusdw(<4 x i32> %a0, <4 x i32> %a1) {
167 ; SSE-LABEL: test_x86_sse41_packusdw:
169 ; SSE-NEXT: packusdw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x2b,0xc1]
170 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
172 ; AVX1-LABEL: test_x86_sse41_packusdw:
174 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
175 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
177 ; AVX512-LABEL: test_x86_sse41_packusdw:
179 ; AVX512-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
180 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
181 %res = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1) ; <<8 x i16>> [#uses=1]
184 declare <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32>, <4 x i32>) nounwind readnone
187 define <8 x i16> @test_x86_sse41_packusdw_fold() {
188 ; X86-SSE-LABEL: test_x86_sse41_packusdw_fold:
190 ; X86-SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
191 ; X86-SSE-NEXT: ## encoding: [0x0f,0x28,0x05,A,A,A,A]
192 ; X86-SSE-NEXT: ## fixup A - offset: 3, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
193 ; X86-SSE-NEXT: retl ## encoding: [0xc3]
195 ; X86-AVX1-LABEL: test_x86_sse41_packusdw_fold:
196 ; X86-AVX1: ## %bb.0:
197 ; X86-AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
198 ; X86-AVX1-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
199 ; X86-AVX1-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
200 ; X86-AVX1-NEXT: retl ## encoding: [0xc3]
202 ; X86-AVX512-LABEL: test_x86_sse41_packusdw_fold:
203 ; X86-AVX512: ## %bb.0:
204 ; X86-AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
205 ; X86-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
206 ; X86-AVX512-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
207 ; X86-AVX512-NEXT: retl ## encoding: [0xc3]
209 ; X64-SSE-LABEL: test_x86_sse41_packusdw_fold:
211 ; X64-SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
212 ; X64-SSE-NEXT: ## encoding: [0x0f,0x28,0x05,A,A,A,A]
213 ; X64-SSE-NEXT: ## fixup A - offset: 3, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
214 ; X64-SSE-NEXT: retq ## encoding: [0xc3]
216 ; X64-AVX1-LABEL: test_x86_sse41_packusdw_fold:
217 ; X64-AVX1: ## %bb.0:
218 ; X64-AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
219 ; X64-AVX1-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
220 ; X64-AVX1-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
221 ; X64-AVX1-NEXT: retq ## encoding: [0xc3]
223 ; X64-AVX512-LABEL: test_x86_sse41_packusdw_fold:
224 ; X64-AVX512: ## %bb.0:
225 ; X64-AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
226 ; X64-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
227 ; X64-AVX512-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
228 ; X64-AVX512-NEXT: retq ## encoding: [0xc3]
229 %res = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> zeroinitializer, <4 x i32> <i32 65535, i32 65536, i32 -1, i32 -131072>)
234 define <16 x i8> @test_x86_sse41_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) {
235 ; SSE-LABEL: test_x86_sse41_pblendvb:
237 ; SSE-NEXT: movdqa %xmm0, %xmm3 ## encoding: [0x66,0x0f,0x6f,0xd8]
238 ; SSE-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
239 ; SSE-NEXT: pblendvb %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x10,0xd9]
240 ; SSE-NEXT: movdqa %xmm3, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc3]
241 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
243 ; AVX-LABEL: test_x86_sse41_pblendvb:
245 ; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4c,0xc1,0x20]
246 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
247 %res = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) ; <<16 x i8>> [#uses=1]
250 declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
253 define <8 x i16> @test_x86_sse41_phminposuw(<8 x i16> %a0) {
254 ; SSE-LABEL: test_x86_sse41_phminposuw:
256 ; SSE-NEXT: phminposuw %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x41,0xc0]
257 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
259 ; AVX-LABEL: test_x86_sse41_phminposuw:
261 ; AVX-NEXT: vphminposuw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x41,0xc0]
262 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
263 %res = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %a0) ; <<8 x i16>> [#uses=1]
266 declare <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16>) nounwind readnone
269 define i32 @test_x86_sse41_ptestc(<2 x i64> %a0, <2 x i64> %a1) {
270 ; SSE-LABEL: test_x86_sse41_ptestc:
272 ; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
273 ; SSE-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
274 ; SSE-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
275 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
277 ; AVX-LABEL: test_x86_sse41_ptestc:
279 ; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
280 ; AVX-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
281 ; AVX-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
282 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
283 %res = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
286 declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone
289 define i32 @test_x86_sse41_ptestnzc(<2 x i64> %a0, <2 x i64> %a1) {
290 ; SSE-LABEL: test_x86_sse41_ptestnzc:
292 ; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
293 ; SSE-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
294 ; SSE-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
295 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
297 ; AVX-LABEL: test_x86_sse41_ptestnzc:
299 ; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
300 ; AVX-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
301 ; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
302 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
303 %res = call i32 @llvm.x86.sse41.ptestnzc(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
306 declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone
309 define i32 @test_x86_sse41_ptestz(<2 x i64> %a0, <2 x i64> %a1) {
310 ; SSE-LABEL: test_x86_sse41_ptestz:
312 ; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
313 ; SSE-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
314 ; SSE-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
315 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
317 ; AVX-LABEL: test_x86_sse41_ptestz:
319 ; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
320 ; AVX-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
321 ; AVX-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
322 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
323 %res = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
326 declare i32 @llvm.x86.sse41.ptestz(<2 x i64>, <2 x i64>) nounwind readnone
329 define <2 x double> @test_x86_sse41_round_pd(<2 x double> %a0) {
330 ; SSE-LABEL: test_x86_sse41_round_pd:
332 ; SSE-NEXT: roundpd $7, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x09,0xc0,0x07]
333 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
335 ; AVX1-LABEL: test_x86_sse41_round_pd:
337 ; AVX1-NEXT: vroundpd $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x09,0xc0,0x07]
338 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
340 ; AVX512-LABEL: test_x86_sse41_round_pd:
342 ; AVX512-NEXT: vroundpd $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x09,0xc0,0x07]
343 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
344 %res = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 7) ; <<2 x double>> [#uses=1]
345 ret <2 x double> %res
347 declare <2 x double> @llvm.x86.sse41.round.pd(<2 x double>, i32) nounwind readnone
350 define <4 x float> @test_x86_sse41_round_ps(<4 x float> %a0) {
351 ; SSE-LABEL: test_x86_sse41_round_ps:
353 ; SSE-NEXT: roundps $7, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x08,0xc0,0x07]
354 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
356 ; AVX1-LABEL: test_x86_sse41_round_ps:
358 ; AVX1-NEXT: vroundps $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x08,0xc0,0x07]
359 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
361 ; AVX512-LABEL: test_x86_sse41_round_ps:
363 ; AVX512-NEXT: vroundps $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x08,0xc0,0x07]
364 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
365 %res = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 7) ; <<4 x float>> [#uses=1]
368 declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
371 define <2 x double> @test_x86_sse41_round_sd(<2 x double> %a0, <2 x double> %a1) {
372 ; SSE-LABEL: test_x86_sse41_round_sd:
374 ; SSE-NEXT: roundsd $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0xc1,0x07]
375 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
377 ; AVX1-LABEL: test_x86_sse41_round_sd:
379 ; AVX1-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0xc1,0x07]
380 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
382 ; AVX512-LABEL: test_x86_sse41_round_sd:
384 ; AVX512-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0b,0xc1,0x07]
385 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
386 %res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 7) ; <<2 x double>> [#uses=1]
387 ret <2 x double> %res
389 declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) nounwind readnone
392 define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, ptr %a1) {
393 ; X86-SSE-LABEL: test_x86_sse41_round_sd_load:
395 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
396 ; X86-SSE-NEXT: roundsd $7, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0x00,0x07]
397 ; X86-SSE-NEXT: retl ## encoding: [0xc3]
399 ; X86-AVX1-LABEL: test_x86_sse41_round_sd_load:
400 ; X86-AVX1: ## %bb.0:
401 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
402 ; X86-AVX1-NEXT: vroundsd $7, (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0x00,0x07]
403 ; X86-AVX1-NEXT: retl ## encoding: [0xc3]
405 ; X86-AVX512-LABEL: test_x86_sse41_round_sd_load:
406 ; X86-AVX512: ## %bb.0:
407 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
408 ; X86-AVX512-NEXT: vroundsd $7, (%eax), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0b,0x00,0x07]
409 ; X86-AVX512-NEXT: retl ## encoding: [0xc3]
411 ; X64-SSE-LABEL: test_x86_sse41_round_sd_load:
413 ; X64-SSE-NEXT: roundsd $7, (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0x07,0x07]
414 ; X64-SSE-NEXT: retq ## encoding: [0xc3]
416 ; X64-AVX1-LABEL: test_x86_sse41_round_sd_load:
417 ; X64-AVX1: ## %bb.0:
418 ; X64-AVX1-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0x07,0x07]
419 ; X64-AVX1-NEXT: retq ## encoding: [0xc3]
421 ; X64-AVX512-LABEL: test_x86_sse41_round_sd_load:
422 ; X64-AVX512: ## %bb.0:
423 ; X64-AVX512-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0b,0x07,0x07]
424 ; X64-AVX512-NEXT: retq ## encoding: [0xc3]
425 %a1b = load <2 x double>, ptr %a1
426 %res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1b, i32 7) ; <<2 x double>> [#uses=1]
427 ret <2 x double> %res
431 define <4 x float> @test_x86_sse41_round_ss(<4 x float> %a0, <4 x float> %a1) {
432 ; SSE-LABEL: test_x86_sse41_round_ss:
434 ; SSE-NEXT: roundss $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0a,0xc1,0x07]
435 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
437 ; AVX1-LABEL: test_x86_sse41_round_ss:
439 ; AVX1-NEXT: vroundss $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0a,0xc1,0x07]
440 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
442 ; AVX512-LABEL: test_x86_sse41_round_ss:
444 ; AVX512-NEXT: vroundss $7, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0a,0xc1,0x07]
445 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
446 %res = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 7) ; <<4 x float>> [#uses=1]
449 declare <4 x float> @llvm.x86.sse41.round.ss(<4 x float>, <4 x float>, i32) nounwind readnone