1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin -mattr=avx2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2 --check-prefix=X86 --check-prefix=X86-AVX
3 ; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL --check-prefix=X86 --check-prefix=X86-AVX512VL
4 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=avx2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2 --check-prefix=X64 --check-prefix=X64-AVX
5 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL --check-prefix=X64 --check-prefix=X64-AVX512VL
7 define <16 x i16> @test_x86_avx2_packssdw(<8 x i32> %a0, <8 x i32> %a1) {
8 ; AVX2-LABEL: test_x86_avx2_packssdw:
10 ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x6b,0xc1]
11 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
13 ; AVX512VL-LABEL: test_x86_avx2_packssdw:
15 ; AVX512VL-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6b,0xc1]
16 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
17 %res = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a0, <8 x i32> %a1) ; <<16 x i16>> [#uses=1]
20 declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>) nounwind readnone
23 define <16 x i16> @test_x86_avx2_packssdw_fold() {
24 ; X86-AVX-LABEL: test_x86_avx2_packssdw_fold:
26 ; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
27 ; X86-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
28 ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI1_0, kind: FK_Data_4
29 ; X86-AVX-NEXT: retl ## encoding: [0xc3]
31 ; X86-AVX512VL-LABEL: test_x86_avx2_packssdw_fold:
32 ; X86-AVX512VL: ## %bb.0:
33 ; X86-AVX512VL-NEXT: vmovaps LCPI1_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
34 ; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
35 ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI1_0, kind: FK_Data_4
36 ; X86-AVX512VL-NEXT: retl ## encoding: [0xc3]
38 ; X64-AVX-LABEL: test_x86_avx2_packssdw_fold:
40 ; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
41 ; X64-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
42 ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI1_0-4, kind: reloc_riprel_4byte
43 ; X64-AVX-NEXT: retq ## encoding: [0xc3]
45 ; X64-AVX512VL-LABEL: test_x86_avx2_packssdw_fold:
46 ; X64-AVX512VL: ## %bb.0:
47 ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
48 ; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
49 ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI1_0-4, kind: reloc_riprel_4byte
50 ; X64-AVX512VL-NEXT: retq ## encoding: [0xc3]
51 %res = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> zeroinitializer, <8 x i32> <i32 255, i32 32767, i32 65535, i32 -1, i32 -32767, i32 -65535, i32 0, i32 -256>)
56 define <32 x i8> @test_x86_avx2_packsswb(<16 x i16> %a0, <16 x i16> %a1) {
57 ; AVX2-LABEL: test_x86_avx2_packsswb:
59 ; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x63,0xc1]
60 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
62 ; AVX512VL-LABEL: test_x86_avx2_packsswb:
64 ; AVX512VL-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x63,0xc1]
65 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
66 %res = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a0, <16 x i16> %a1) ; <<32 x i8>> [#uses=1]
69 declare <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16>, <16 x i16>) nounwind readnone
72 define <32 x i8> @test_x86_avx2_packsswb_fold() {
73 ; X86-AVX-LABEL: test_x86_avx2_packsswb_fold:
75 ; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
76 ; X86-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
77 ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI3_0, kind: FK_Data_4
78 ; X86-AVX-NEXT: retl ## encoding: [0xc3]
80 ; X86-AVX512VL-LABEL: test_x86_avx2_packsswb_fold:
81 ; X86-AVX512VL: ## %bb.0:
82 ; X86-AVX512VL-NEXT: vmovaps LCPI3_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
83 ; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
84 ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI3_0, kind: FK_Data_4
85 ; X86-AVX512VL-NEXT: retl ## encoding: [0xc3]
87 ; X64-AVX-LABEL: test_x86_avx2_packsswb_fold:
89 ; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
90 ; X64-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
91 ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
92 ; X64-AVX-NEXT: retq ## encoding: [0xc3]
94 ; X64-AVX512VL-LABEL: test_x86_avx2_packsswb_fold:
95 ; X64-AVX512VL: ## %bb.0:
96 ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
97 ; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
98 ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
99 ; X64-AVX512VL-NEXT: retq ## encoding: [0xc3]
100 %res = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678, i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <16 x i16> zeroinitializer)
105 define <32 x i8> @test_x86_avx2_packuswb(<16 x i16> %a0, <16 x i16> %a1) {
106 ; AVX2-LABEL: test_x86_avx2_packuswb:
108 ; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x67,0xc1]
109 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
111 ; AVX512VL-LABEL: test_x86_avx2_packuswb:
112 ; AVX512VL: ## %bb.0:
113 ; AVX512VL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x67,0xc1]
114 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
115 %res = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a0, <16 x i16> %a1) ; <<32 x i8>> [#uses=1]
118 declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>) nounwind readnone
121 define <32 x i8> @test_x86_avx2_packuswb_fold() {
122 ; X86-AVX-LABEL: test_x86_avx2_packuswb_fold:
124 ; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
125 ; X86-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
126 ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI5_0, kind: FK_Data_4
127 ; X86-AVX-NEXT: retl ## encoding: [0xc3]
129 ; X86-AVX512VL-LABEL: test_x86_avx2_packuswb_fold:
130 ; X86-AVX512VL: ## %bb.0:
131 ; X86-AVX512VL-NEXT: vmovaps LCPI5_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
132 ; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
133 ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI5_0, kind: FK_Data_4
134 ; X86-AVX512VL-NEXT: retl ## encoding: [0xc3]
136 ; X64-AVX-LABEL: test_x86_avx2_packuswb_fold:
138 ; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
139 ; X64-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
140 ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI5_0-4, kind: reloc_riprel_4byte
141 ; X64-AVX-NEXT: retq ## encoding: [0xc3]
143 ; X64-AVX512VL-LABEL: test_x86_avx2_packuswb_fold:
144 ; X64-AVX512VL: ## %bb.0:
145 ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
146 ; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
147 ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI5_0-4, kind: reloc_riprel_4byte
148 ; X64-AVX512VL-NEXT: retq ## encoding: [0xc3]
149 %res = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678, i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <16 x i16> zeroinitializer)
154 define <8 x i32> @test_x86_avx2_pmadd_wd(<16 x i16> %a0, <16 x i16> %a1) {
155 ; AVX2-LABEL: test_x86_avx2_pmadd_wd:
157 ; AVX2-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf5,0xc1]
158 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
160 ; AVX512VL-LABEL: test_x86_avx2_pmadd_wd:
161 ; AVX512VL: ## %bb.0:
162 ; AVX512VL-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf5,0xc1]
163 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
164 %res = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a0, <16 x i16> %a1) ; <<8 x i32>> [#uses=1]
167 declare <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16>, <16 x i16>) nounwind readnone
170 define <16 x i16> @test_x86_avx2_pmaxs_w(<16 x i16> %a0, <16 x i16> %a1) {
171 ; AVX2-LABEL: test_x86_avx2_pmaxs_w:
173 ; AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xee,0xc1]
174 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
176 ; AVX512VL-LABEL: test_x86_avx2_pmaxs_w:
177 ; AVX512VL: ## %bb.0:
178 ; AVX512VL-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xee,0xc1]
179 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
180 %res = call <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
183 declare <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16>, <16 x i16>) nounwind readnone
186 define <32 x i8> @test_x86_avx2_pmaxu_b(<32 x i8> %a0, <32 x i8> %a1) {
187 ; AVX2-LABEL: test_x86_avx2_pmaxu_b:
189 ; AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xde,0xc1]
190 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
192 ; AVX512VL-LABEL: test_x86_avx2_pmaxu_b:
193 ; AVX512VL: ## %bb.0:
194 ; AVX512VL-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xde,0xc1]
195 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
196 %res = call <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
199 declare <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8>, <32 x i8>) nounwind readnone
202 define <16 x i16> @test_x86_avx2_pmins_w(<16 x i16> %a0, <16 x i16> %a1) {
203 ; AVX2-LABEL: test_x86_avx2_pmins_w:
205 ; AVX2-NEXT: vpminsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xea,0xc1]
206 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
208 ; AVX512VL-LABEL: test_x86_avx2_pmins_w:
209 ; AVX512VL: ## %bb.0:
210 ; AVX512VL-NEXT: vpminsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xea,0xc1]
211 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
212 %res = call <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
215 declare <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16>, <16 x i16>) nounwind readnone
218 define <32 x i8> @test_x86_avx2_pminu_b(<32 x i8> %a0, <32 x i8> %a1) {
219 ; AVX2-LABEL: test_x86_avx2_pminu_b:
221 ; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xda,0xc1]
222 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
224 ; AVX512VL-LABEL: test_x86_avx2_pminu_b:
225 ; AVX512VL: ## %bb.0:
226 ; AVX512VL-NEXT: vpminub %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xda,0xc1]
227 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
228 %res = call <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
231 declare <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8>, <32 x i8>) nounwind readnone
234 define i32 @test_x86_avx2_pmovmskb(<32 x i8> %a0) {
235 ; CHECK-LABEL: test_x86_avx2_pmovmskb:
237 ; CHECK-NEXT: vpmovmskb %ymm0, %eax ## encoding: [0xc5,0xfd,0xd7,0xc0]
238 ; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
239 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
240 %res = call i32 @llvm.x86.avx2.pmovmskb(<32 x i8> %a0) ; <i32> [#uses=1]
243 declare i32 @llvm.x86.avx2.pmovmskb(<32 x i8>) nounwind readnone
246 define <16 x i16> @test_x86_avx2_pmulh_w(<16 x i16> %a0, <16 x i16> %a1) {
247 ; AVX2-LABEL: test_x86_avx2_pmulh_w:
249 ; AVX2-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe5,0xc1]
250 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
252 ; AVX512VL-LABEL: test_x86_avx2_pmulh_w:
253 ; AVX512VL: ## %bb.0:
254 ; AVX512VL-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe5,0xc1]
255 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
256 %res = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
259 declare <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16>, <16 x i16>) nounwind readnone
262 define <16 x i16> @test_x86_avx2_pmulhu_w(<16 x i16> %a0, <16 x i16> %a1) {
263 ; AVX2-LABEL: test_x86_avx2_pmulhu_w:
265 ; AVX2-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe4,0xc1]
266 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
268 ; AVX512VL-LABEL: test_x86_avx2_pmulhu_w:
269 ; AVX512VL: ## %bb.0:
270 ; AVX512VL-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe4,0xc1]
271 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
272 %res = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
275 declare <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16>, <16 x i16>) nounwind readnone
278 define <4 x i64> @test_x86_avx2_psad_bw(<32 x i8> %a0, <32 x i8> %a1) {
279 ; AVX2-LABEL: test_x86_avx2_psad_bw:
281 ; AVX2-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf6,0xc1]
282 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
284 ; AVX512VL-LABEL: test_x86_avx2_psad_bw:
285 ; AVX512VL: ## %bb.0:
286 ; AVX512VL-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf6,0xc1]
287 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
288 %res = call <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8> %a0, <32 x i8> %a1) ; <<4 x i64>> [#uses=1]
291 declare <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8>, <32 x i8>) nounwind readnone
294 define <8 x i32> @test_x86_avx2_psll_d(<8 x i32> %a0, <4 x i32> %a1) {
295 ; AVX2-LABEL: test_x86_avx2_psll_d:
297 ; AVX2-NEXT: vpslld %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf2,0xc1]
298 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
300 ; AVX512VL-LABEL: test_x86_avx2_psll_d:
301 ; AVX512VL: ## %bb.0:
302 ; AVX512VL-NEXT: vpslld %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf2,0xc1]
303 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
304 %res = call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1]
307 declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) nounwind readnone
310 define <4 x i64> @test_x86_avx2_psll_q(<4 x i64> %a0, <2 x i64> %a1) {
311 ; AVX2-LABEL: test_x86_avx2_psll_q:
313 ; AVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf3,0xc1]
314 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
316 ; AVX512VL-LABEL: test_x86_avx2_psll_q:
317 ; AVX512VL: ## %bb.0:
318 ; AVX512VL-NEXT: vpsllq %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf3,0xc1]
319 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
320 %res = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %a0, <2 x i64> %a1) ; <<4 x i64>> [#uses=1]
323 declare <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64>, <2 x i64>) nounwind readnone
326 define <16 x i16> @test_x86_avx2_psll_w(<16 x i16> %a0, <8 x i16> %a1) {
327 ; AVX2-LABEL: test_x86_avx2_psll_w:
329 ; AVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf1,0xc1]
330 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
332 ; AVX512VL-LABEL: test_x86_avx2_psll_w:
333 ; AVX512VL: ## %bb.0:
334 ; AVX512VL-NEXT: vpsllw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf1,0xc1]
335 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
336 %res = call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
339 declare <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16>, <8 x i16>) nounwind readnone
342 define <8 x i32> @test_x86_avx2_pslli_d(<8 x i32> %a0) {
343 ; AVX2-LABEL: test_x86_avx2_pslli_d:
345 ; AVX2-NEXT: vpslld $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xf0,0x07]
346 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
348 ; AVX512VL-LABEL: test_x86_avx2_pslli_d:
349 ; AVX512VL: ## %bb.0:
350 ; AVX512VL-NEXT: vpslld $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xf0,0x07]
351 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
352 %res = call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1]
355 declare <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32>, i32) nounwind readnone
358 define <4 x i64> @test_x86_avx2_pslli_q(<4 x i64> %a0) {
359 ; AVX2-LABEL: test_x86_avx2_pslli_q:
361 ; AVX2-NEXT: vpsllq $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x73,0xf0,0x07]
362 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
364 ; AVX512VL-LABEL: test_x86_avx2_pslli_q:
365 ; AVX512VL: ## %bb.0:
366 ; AVX512VL-NEXT: vpsllq $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x73,0xf0,0x07]
367 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
368 %res = call <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
371 declare <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64>, i32) nounwind readnone
374 define <16 x i16> @test_x86_avx2_pslli_w(<16 x i16> %a0) {
375 ; AVX2-LABEL: test_x86_avx2_pslli_w:
377 ; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xf0,0x07]
378 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
380 ; AVX512VL-LABEL: test_x86_avx2_pslli_w:
381 ; AVX512VL: ## %bb.0:
382 ; AVX512VL-NEXT: vpsllw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xf0,0x07]
383 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
384 %res = call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1]
387 declare <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16>, i32) nounwind readnone
390 define <8 x i32> @test_x86_avx2_psra_d(<8 x i32> %a0, <4 x i32> %a1) {
391 ; AVX2-LABEL: test_x86_avx2_psra_d:
393 ; AVX2-NEXT: vpsrad %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe2,0xc1]
394 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
396 ; AVX512VL-LABEL: test_x86_avx2_psra_d:
397 ; AVX512VL: ## %bb.0:
398 ; AVX512VL-NEXT: vpsrad %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe2,0xc1]
399 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
400 %res = call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1]
403 declare <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32>, <4 x i32>) nounwind readnone
406 define <16 x i16> @test_x86_avx2_psra_w(<16 x i16> %a0, <8 x i16> %a1) {
407 ; AVX2-LABEL: test_x86_avx2_psra_w:
409 ; AVX2-NEXT: vpsraw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe1,0xc1]
410 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
412 ; AVX512VL-LABEL: test_x86_avx2_psra_w:
413 ; AVX512VL: ## %bb.0:
414 ; AVX512VL-NEXT: vpsraw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe1,0xc1]
415 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
416 %res = call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
419 declare <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16>, <8 x i16>) nounwind readnone
422 define <8 x i32> @test_x86_avx2_psrai_d(<8 x i32> %a0) {
423 ; AVX2-LABEL: test_x86_avx2_psrai_d:
425 ; AVX2-NEXT: vpsrad $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xe0,0x07]
426 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
428 ; AVX512VL-LABEL: test_x86_avx2_psrai_d:
429 ; AVX512VL: ## %bb.0:
430 ; AVX512VL-NEXT: vpsrad $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xe0,0x07]
431 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
432 %res = call <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1]
435 declare <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32>, i32) nounwind readnone
438 define <16 x i16> @test_x86_avx2_psrai_w(<16 x i16> %a0) {
439 ; AVX2-LABEL: test_x86_avx2_psrai_w:
441 ; AVX2-NEXT: vpsraw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xe0,0x07]
442 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
444 ; AVX512VL-LABEL: test_x86_avx2_psrai_w:
445 ; AVX512VL: ## %bb.0:
446 ; AVX512VL-NEXT: vpsraw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xe0,0x07]
447 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
448 %res = call <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1]
451 declare <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16>, i32) nounwind readnone
454 define <8 x i32> @test_x86_avx2_psrl_d(<8 x i32> %a0, <4 x i32> %a1) {
455 ; AVX2-LABEL: test_x86_avx2_psrl_d:
457 ; AVX2-NEXT: vpsrld %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd2,0xc1]
458 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
460 ; AVX512VL-LABEL: test_x86_avx2_psrl_d:
461 ; AVX512VL: ## %bb.0:
462 ; AVX512VL-NEXT: vpsrld %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd2,0xc1]
463 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
464 %res = call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1]
467 declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) nounwind readnone
470 define <4 x i64> @test_x86_avx2_psrl_q(<4 x i64> %a0, <2 x i64> %a1) {
471 ; AVX2-LABEL: test_x86_avx2_psrl_q:
473 ; AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd3,0xc1]
474 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
476 ; AVX512VL-LABEL: test_x86_avx2_psrl_q:
477 ; AVX512VL: ## %bb.0:
478 ; AVX512VL-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd3,0xc1]
479 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
480 %res = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %a0, <2 x i64> %a1) ; <<4 x i64>> [#uses=1]
483 declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) nounwind readnone
486 define <16 x i16> @test_x86_avx2_psrl_w(<16 x i16> %a0, <8 x i16> %a1) {
487 ; AVX2-LABEL: test_x86_avx2_psrl_w:
489 ; AVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd1,0xc1]
490 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
492 ; AVX512VL-LABEL: test_x86_avx2_psrl_w:
493 ; AVX512VL: ## %bb.0:
494 ; AVX512VL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0xc1]
495 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
496 %res = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
499 declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) nounwind readnone
502 define <16 x i16> @test_x86_avx2_psrl_w_load(<16 x i16> %a0, <8 x i16>* %p) {
503 ; X86-AVX-LABEL: test_x86_avx2_psrl_w_load:
505 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
506 ; X86-AVX-NEXT: vpsrlw (%eax), %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd1,0x00]
507 ; X86-AVX-NEXT: retl ## encoding: [0xc3]
509 ; X86-AVX512VL-LABEL: test_x86_avx2_psrl_w_load:
510 ; X86-AVX512VL: ## %bb.0:
511 ; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
512 ; X86-AVX512VL-NEXT: vpsrlw (%eax), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0x00]
513 ; X86-AVX512VL-NEXT: retl ## encoding: [0xc3]
515 ; X64-AVX-LABEL: test_x86_avx2_psrl_w_load:
517 ; X64-AVX-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd1,0x07]
518 ; X64-AVX-NEXT: retq ## encoding: [0xc3]
520 ; X64-AVX512VL-LABEL: test_x86_avx2_psrl_w_load:
521 ; X64-AVX512VL: ## %bb.0:
522 ; X64-AVX512VL-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0x07]
523 ; X64-AVX512VL-NEXT: retq ## encoding: [0xc3]
524 %a1 = load <8 x i16>, <8 x i16>* %p
525 %res = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
530 define <8 x i32> @test_x86_avx2_psrli_d(<8 x i32> %a0) {
531 ; AVX2-LABEL: test_x86_avx2_psrli_d:
533 ; AVX2-NEXT: vpsrld $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xd0,0x07]
534 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
536 ; AVX512VL-LABEL: test_x86_avx2_psrli_d:
537 ; AVX512VL: ## %bb.0:
538 ; AVX512VL-NEXT: vpsrld $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xd0,0x07]
539 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
540 %res = call <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1]
543 declare <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32>, i32) nounwind readnone
546 define <4 x i64> @test_x86_avx2_psrli_q(<4 x i64> %a0) {
547 ; AVX2-LABEL: test_x86_avx2_psrli_q:
549 ; AVX2-NEXT: vpsrlq $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x73,0xd0,0x07]
550 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
552 ; AVX512VL-LABEL: test_x86_avx2_psrli_q:
553 ; AVX512VL: ## %bb.0:
554 ; AVX512VL-NEXT: vpsrlq $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x73,0xd0,0x07]
555 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
556 %res = call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
559 declare <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64>, i32) nounwind readnone
562 define <16 x i16> @test_x86_avx2_psrli_w(<16 x i16> %a0) {
563 ; AVX2-LABEL: test_x86_avx2_psrli_w:
565 ; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xd0,0x07]
566 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
568 ; AVX512VL-LABEL: test_x86_avx2_psrli_w:
569 ; AVX512VL: ## %bb.0:
570 ; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xd0,0x07]
571 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
572 %res = call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1]
575 declare <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16>, i32) nounwind readnone
578 define <8 x i32> @test_x86_avx2_phadd_d(<8 x i32> %a0, <8 x i32> %a1) {
579 ; CHECK-LABEL: test_x86_avx2_phadd_d:
581 ; CHECK-NEXT: vphaddd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x02,0xc1]
582 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
583 %res = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
586 declare <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32>, <8 x i32>) nounwind readnone
589 define <16 x i16> @test_x86_avx2_phadd_sw(<16 x i16> %a0, <16 x i16> %a1) {
590 ; CHECK-LABEL: test_x86_avx2_phadd_sw:
592 ; CHECK-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x03,0xc1]
593 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
594 %res = call <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
597 declare <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16>, <16 x i16>) nounwind readnone
600 define <16 x i16> @test_x86_avx2_phadd_w(<16 x i16> %a0, <16 x i16> %a1) {
601 ; CHECK-LABEL: test_x86_avx2_phadd_w:
603 ; CHECK-NEXT: vphaddw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x01,0xc1]
604 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
605 %res = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
608 declare <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16>, <16 x i16>) nounwind readnone
611 define <8 x i32> @test_x86_avx2_phsub_d(<8 x i32> %a0, <8 x i32> %a1) {
612 ; CHECK-LABEL: test_x86_avx2_phsub_d:
614 ; CHECK-NEXT: vphsubd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x06,0xc1]
615 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
616 %res = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
619 declare <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32>, <8 x i32>) nounwind readnone
622 define <16 x i16> @test_x86_avx2_phsub_sw(<16 x i16> %a0, <16 x i16> %a1) {
623 ; CHECK-LABEL: test_x86_avx2_phsub_sw:
625 ; CHECK-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x07,0xc1]
626 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
627 %res = call <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
630 declare <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16>, <16 x i16>) nounwind readnone
633 define <16 x i16> @test_x86_avx2_phsub_w(<16 x i16> %a0, <16 x i16> %a1) {
634 ; CHECK-LABEL: test_x86_avx2_phsub_w:
636 ; CHECK-NEXT: vphsubw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x05,0xc1]
637 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
638 %res = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
641 declare <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16>, <16 x i16>) nounwind readnone
644 define <16 x i16> @test_x86_avx2_pmadd_ub_sw(<32 x i8> %a0, <32 x i8> %a1) {
645 ; AVX2-LABEL: test_x86_avx2_pmadd_ub_sw:
647 ; AVX2-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x04,0xc1]
648 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
650 ; AVX512VL-LABEL: test_x86_avx2_pmadd_ub_sw:
651 ; AVX512VL: ## %bb.0:
652 ; AVX512VL-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x04,0xc1]
653 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
654 %res = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1) ; <<16 x i16>> [#uses=1]
657 declare <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8>, <32 x i8>) nounwind readnone
659 ; Make sure we don't commute this operation.
660 define <16 x i16> @test_x86_avx2_pmadd_ub_sw_load_op0(<32 x i8>* %ptr, <32 x i8> %a1) {
661 ; X86-AVX-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0:
663 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
664 ; X86-AVX-NEXT: vmovdqa (%eax), %ymm1 ## encoding: [0xc5,0xfd,0x6f,0x08]
665 ; X86-AVX-NEXT: vpmaddubsw %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x04,0xc0]
666 ; X86-AVX-NEXT: retl ## encoding: [0xc3]
668 ; X86-AVX512VL-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0:
669 ; X86-AVX512VL: ## %bb.0:
670 ; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
671 ; X86-AVX512VL-NEXT: vmovdqa (%eax), %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x08]
672 ; X86-AVX512VL-NEXT: vpmaddubsw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x04,0xc0]
673 ; X86-AVX512VL-NEXT: retl ## encoding: [0xc3]
675 ; X64-AVX-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0:
677 ; X64-AVX-NEXT: vmovdqa (%rdi), %ymm1 ## encoding: [0xc5,0xfd,0x6f,0x0f]
678 ; X64-AVX-NEXT: vpmaddubsw %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x04,0xc0]
679 ; X64-AVX-NEXT: retq ## encoding: [0xc3]
681 ; X64-AVX512VL-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0:
682 ; X64-AVX512VL: ## %bb.0:
683 ; X64-AVX512VL-NEXT: vmovdqa (%rdi), %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x0f]
684 ; X64-AVX512VL-NEXT: vpmaddubsw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x04,0xc0]
685 ; X64-AVX512VL-NEXT: retq ## encoding: [0xc3]
686 %a0 = load <32 x i8>, <32 x i8>* %ptr
687 %res = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1) ; <<16 x i16>> [#uses=1]
691 define <16 x i16> @test_x86_avx2_pmul_hr_sw(<16 x i16> %a0, <16 x i16> %a1) {
692 ; AVX2-LABEL: test_x86_avx2_pmul_hr_sw:
694 ; AVX2-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0b,0xc1]
695 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
697 ; AVX512VL-LABEL: test_x86_avx2_pmul_hr_sw:
698 ; AVX512VL: ## %bb.0:
699 ; AVX512VL-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0b,0xc1]
700 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
701 %res = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
704 declare <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16>, <16 x i16>) nounwind readnone
707 define <32 x i8> @test_x86_avx2_pshuf_b(<32 x i8> %a0, <32 x i8> %a1) {
708 ; AVX2-LABEL: test_x86_avx2_pshuf_b:
710 ; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x00,0xc1]
711 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
713 ; AVX512VL-LABEL: test_x86_avx2_pshuf_b:
714 ; AVX512VL: ## %bb.0:
715 ; AVX512VL-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x00,0xc1]
716 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
717 %res = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> %a1) ; <<16 x i8>> [#uses=1]
720 declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>) nounwind readnone
723 define <32 x i8> @test_x86_avx2_psign_b(<32 x i8> %a0, <32 x i8> %a1) {
724 ; CHECK-LABEL: test_x86_avx2_psign_b:
726 ; CHECK-NEXT: vpsignb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x08,0xc1]
727 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
728 %res = call <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
731 declare <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8>, <32 x i8>) nounwind readnone
734 define <8 x i32> @test_x86_avx2_psign_d(<8 x i32> %a0, <8 x i32> %a1) {
735 ; CHECK-LABEL: test_x86_avx2_psign_d:
737 ; CHECK-NEXT: vpsignd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0a,0xc1]
738 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
739 %res = call <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32> %a0, <8 x i32> %a1) ; <<4 x i32>> [#uses=1]
742 declare <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32>, <8 x i32>) nounwind readnone
745 define <16 x i16> @test_x86_avx2_psign_w(<16 x i16> %a0, <16 x i16> %a1) {
746 ; CHECK-LABEL: test_x86_avx2_psign_w:
748 ; CHECK-NEXT: vpsignw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x09,0xc1]
749 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
750 %res = call <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
753 declare <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16>, <16 x i16>) nounwind readnone
756 define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) {
757 ; CHECK-LABEL: test_x86_avx2_mpsadbw:
759 ; CHECK-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x42,0xc1,0x07]
760 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
761 %res = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> %a0, <32 x i8> %a1, i8 7) ; <<16 x i16>> [#uses=1]
764 declare <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8>, <32 x i8>, i8) nounwind readnone
767 define <16 x i16> @test_x86_avx2_packusdw(<8 x i32> %a0, <8 x i32> %a1) {
768 ; AVX2-LABEL: test_x86_avx2_packusdw:
770 ; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x2b,0xc1]
771 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
773 ; AVX512VL-LABEL: test_x86_avx2_packusdw:
774 ; AVX512VL: ## %bb.0:
775 ; AVX512VL-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x2b,0xc1]
776 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
777 %res = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a0, <8 x i32> %a1) ; <<16 x i16>> [#uses=1]
780 declare <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32>, <8 x i32>) nounwind readnone
783 define <16 x i16> @test_x86_avx2_packusdw_fold() {
784 ; X86-AVX-LABEL: test_x86_avx2_packusdw_fold:
786 ; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
787 ; X86-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
788 ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI47_0, kind: FK_Data_4
789 ; X86-AVX-NEXT: retl ## encoding: [0xc3]
791 ; X86-AVX512VL-LABEL: test_x86_avx2_packusdw_fold:
792 ; X86-AVX512VL: ## %bb.0:
793 ; X86-AVX512VL-NEXT: vmovaps LCPI47_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
794 ; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
795 ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI47_0, kind: FK_Data_4
796 ; X86-AVX512VL-NEXT: retl ## encoding: [0xc3]
798 ; X64-AVX-LABEL: test_x86_avx2_packusdw_fold:
800 ; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
801 ; X64-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
802 ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI47_0-4, kind: reloc_riprel_4byte
803 ; X64-AVX-NEXT: retq ## encoding: [0xc3]
805 ; X64-AVX512VL-LABEL: test_x86_avx2_packusdw_fold:
806 ; X64-AVX512VL: ## %bb.0:
807 ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
808 ; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
809 ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI47_0-4, kind: reloc_riprel_4byte
810 ; X64-AVX512VL-NEXT: retq ## encoding: [0xc3]
811 %res = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> zeroinitializer, <8 x i32> <i32 255, i32 32767, i32 65535, i32 -1, i32 -32767, i32 -65535, i32 0, i32 -256>)
816 define <32 x i8> @test_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) {
817 ; CHECK-LABEL: test_x86_avx2_pblendvb:
819 ; CHECK-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x4c,0xc1,0x20]
820 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
821 %res = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) ; <<32 x i8>> [#uses=1]
824 declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>) nounwind readnone
827 define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
828 ; CHECK-LABEL: test_x86_avx2_pblendw:
830 ; CHECK-NEXT: vpblendw $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x0e,0xc1,0x07]
831 ; CHECK-NEXT: ## ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
832 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
833 %res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i8 7) ; <<16 x i16>> [#uses=1]
836 declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i8) nounwind readnone
839 define <32 x i8> @test_x86_avx2_pmaxsb(<32 x i8> %a0, <32 x i8> %a1) {
840 ; AVX2-LABEL: test_x86_avx2_pmaxsb:
842 ; AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3c,0xc1]
843 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
845 ; AVX512VL-LABEL: test_x86_avx2_pmaxsb:
846 ; AVX512VL: ## %bb.0:
847 ; AVX512VL-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3c,0xc1]
848 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
849 %res = call <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
852 declare <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8>, <32 x i8>) nounwind readnone
855 define <8 x i32> @test_x86_avx2_pmaxsd(<8 x i32> %a0, <8 x i32> %a1) {
856 ; AVX2-LABEL: test_x86_avx2_pmaxsd:
858 ; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3d,0xc1]
859 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
861 ; AVX512VL-LABEL: test_x86_avx2_pmaxsd:
862 ; AVX512VL: ## %bb.0:
863 ; AVX512VL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3d,0xc1]
864 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
865 %res = call <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
868 declare <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32>, <8 x i32>) nounwind readnone
871 define <8 x i32> @test_x86_avx2_pmaxud(<8 x i32> %a0, <8 x i32> %a1) {
872 ; AVX2-LABEL: test_x86_avx2_pmaxud:
874 ; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3f,0xc1]
875 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
877 ; AVX512VL-LABEL: test_x86_avx2_pmaxud:
878 ; AVX512VL: ## %bb.0:
879 ; AVX512VL-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3f,0xc1]
880 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
881 %res = call <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
884 declare <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32>, <8 x i32>) nounwind readnone
887 define <16 x i16> @test_x86_avx2_pmaxuw(<16 x i16> %a0, <16 x i16> %a1) {
888 ; AVX2-LABEL: test_x86_avx2_pmaxuw:
890 ; AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3e,0xc1]
891 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
893 ; AVX512VL-LABEL: test_x86_avx2_pmaxuw:
894 ; AVX512VL: ## %bb.0:
895 ; AVX512VL-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3e,0xc1]
896 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
897 %res = call <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
900 declare <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16>, <16 x i16>) nounwind readnone
903 define <32 x i8> @test_x86_avx2_pminsb(<32 x i8> %a0, <32 x i8> %a1) {
904 ; AVX2-LABEL: test_x86_avx2_pminsb:
906 ; AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x38,0xc1]
907 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
909 ; AVX512VL-LABEL: test_x86_avx2_pminsb:
910 ; AVX512VL: ## %bb.0:
911 ; AVX512VL-NEXT: vpminsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x38,0xc1]
912 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
913 %res = call <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
916 declare <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8>, <32 x i8>) nounwind readnone
919 define <8 x i32> @test_x86_avx2_pminsd(<8 x i32> %a0, <8 x i32> %a1) {
920 ; AVX2-LABEL: test_x86_avx2_pminsd:
922 ; AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x39,0xc1]
923 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
925 ; AVX512VL-LABEL: test_x86_avx2_pminsd:
926 ; AVX512VL: ## %bb.0:
927 ; AVX512VL-NEXT: vpminsd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x39,0xc1]
928 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
929 %res = call <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
932 declare <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32>, <8 x i32>) nounwind readnone
935 define <8 x i32> @test_x86_avx2_pminud(<8 x i32> %a0, <8 x i32> %a1) {
936 ; AVX2-LABEL: test_x86_avx2_pminud:
938 ; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3b,0xc1]
939 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
941 ; AVX512VL-LABEL: test_x86_avx2_pminud:
942 ; AVX512VL: ## %bb.0:
943 ; AVX512VL-NEXT: vpminud %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3b,0xc1]
944 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
945 %res = call <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
948 declare <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32>, <8 x i32>) nounwind readnone
951 define <16 x i16> @test_x86_avx2_pminuw(<16 x i16> %a0, <16 x i16> %a1) {
952 ; AVX2-LABEL: test_x86_avx2_pminuw:
954 ; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3a,0xc1]
955 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
957 ; AVX512VL-LABEL: test_x86_avx2_pminuw:
958 ; AVX512VL: ## %bb.0:
959 ; AVX512VL-NEXT: vpminuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3a,0xc1]
960 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
961 %res = call <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
964 declare <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16>, <16 x i16>) nounwind readnone
967 define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
968 ; CHECK-LABEL: test_x86_avx2_pblendd_128:
970 ; CHECK-NEXT: vblendps $8, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x08]
971 ; CHECK-NEXT: ## xmm0 = xmm1[0,1,2],xmm0[3]
972 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
973 %res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i8 7) ; <<4 x i32>> [#uses=1]
976 declare <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32>, <4 x i32>, i8) nounwind readnone
979 define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
980 ; CHECK-LABEL: test_x86_avx2_pblendd_256:
982 ; CHECK-NEXT: vblendps $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x0c,0xc1,0x07]
983 ; CHECK-NEXT: ## ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
984 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
985 %res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i8 7) ; <<8 x i32>> [#uses=1]
988 declare <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32>, <8 x i32>, i8) nounwind readnone
991 ; Check that the arguments are swapped between the intrinsic definition
992 ; and its lowering. Indeed, the offsets are the first source in
994 define <8 x i32> @test_x86_avx2_permd(<8 x i32> %a0, <8 x i32> %a1) {
995 ; AVX2-LABEL: test_x86_avx2_permd:
997 ; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x16,0xc0]
998 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1000 ; AVX512VL-LABEL: test_x86_avx2_permd:
1001 ; AVX512VL: ## %bb.0:
1002 ; AVX512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xc0]
1003 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1004 %res = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
1007 declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>) nounwind readonly
1010 ; Check that the arguments are swapped between the intrinsic definition
1011 ; and its lowering. Indeed, the offsets are the first source in
1013 define <8 x float> @test_x86_avx2_permps(<8 x float> %a0, <8 x i32> %a1) {
1014 ; AVX2-LABEL: test_x86_avx2_permps:
1016 ; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x16,0xc0]
1017 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1019 ; AVX512VL-LABEL: test_x86_avx2_permps:
1020 ; AVX512VL: ## %bb.0:
1021 ; AVX512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xc0]
1022 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1023 %res = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x i32> %a1) ; <<8 x float>> [#uses=1]
1024 ret <8 x float> %res
1026 declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>) nounwind readonly
1029 define <2 x i64> @test_x86_avx2_maskload_q(i8* %a0, <2 x i64> %a1) {
1030 ; X86-LABEL: test_x86_avx2_maskload_q:
1032 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1033 ; X86-NEXT: vpmaskmovq (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x8c,0x00]
1034 ; X86-NEXT: retl ## encoding: [0xc3]
1036 ; X64-LABEL: test_x86_avx2_maskload_q:
1038 ; X64-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x8c,0x07]
1039 ; X64-NEXT: retq ## encoding: [0xc3]
1040 %res = call <2 x i64> @llvm.x86.avx2.maskload.q(i8* %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
1043 declare <2 x i64> @llvm.x86.avx2.maskload.q(i8*, <2 x i64>) nounwind readonly
1046 define <4 x i64> @test_x86_avx2_maskload_q_256(i8* %a0, <4 x i64> %a1) {
1047 ; X86-LABEL: test_x86_avx2_maskload_q_256:
1049 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1050 ; X86-NEXT: vpmaskmovq (%eax), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x8c,0x00]
1051 ; X86-NEXT: retl ## encoding: [0xc3]
1053 ; X64-LABEL: test_x86_avx2_maskload_q_256:
1055 ; X64-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x8c,0x07]
1056 ; X64-NEXT: retq ## encoding: [0xc3]
1057 %res = call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1]
1060 declare <4 x i64> @llvm.x86.avx2.maskload.q.256(i8*, <4 x i64>) nounwind readonly
1063 define <4 x i32> @test_x86_avx2_maskload_d(i8* %a0, <4 x i32> %a1) {
1064 ; X86-LABEL: test_x86_avx2_maskload_d:
1066 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1067 ; X86-NEXT: vpmaskmovd (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x8c,0x00]
1068 ; X86-NEXT: retl ## encoding: [0xc3]
1070 ; X64-LABEL: test_x86_avx2_maskload_d:
1072 ; X64-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x8c,0x07]
1073 ; X64-NEXT: retq ## encoding: [0xc3]
1074 %res = call <4 x i32> @llvm.x86.avx2.maskload.d(i8* %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
1077 declare <4 x i32> @llvm.x86.avx2.maskload.d(i8*, <4 x i32>) nounwind readonly
1080 define <8 x i32> @test_x86_avx2_maskload_d_256(i8* %a0, <8 x i32> %a1) {
1081 ; X86-LABEL: test_x86_avx2_maskload_d_256:
1083 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1084 ; X86-NEXT: vpmaskmovd (%eax), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x8c,0x00]
1085 ; X86-NEXT: retl ## encoding: [0xc3]
1087 ; X64-LABEL: test_x86_avx2_maskload_d_256:
1089 ; X64-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x8c,0x07]
1090 ; X64-NEXT: retq ## encoding: [0xc3]
1091 %res = call <8 x i32> @llvm.x86.avx2.maskload.d.256(i8* %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
1094 declare <8 x i32> @llvm.x86.avx2.maskload.d.256(i8*, <8 x i32>) nounwind readonly
1097 define void @test_x86_avx2_maskstore_q(i8* %a0, <2 x i64> %a1, <2 x i64> %a2) {
1098 ; X86-LABEL: test_x86_avx2_maskstore_q:
1100 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1101 ; X86-NEXT: vpmaskmovq %xmm1, %xmm0, (%eax) ## encoding: [0xc4,0xe2,0xf9,0x8e,0x08]
1102 ; X86-NEXT: retl ## encoding: [0xc3]
1104 ; X64-LABEL: test_x86_avx2_maskstore_q:
1106 ; X64-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) ## encoding: [0xc4,0xe2,0xf9,0x8e,0x0f]
1107 ; X64-NEXT: retq ## encoding: [0xc3]
1108 call void @llvm.x86.avx2.maskstore.q(i8* %a0, <2 x i64> %a1, <2 x i64> %a2)
1111 declare void @llvm.x86.avx2.maskstore.q(i8*, <2 x i64>, <2 x i64>) nounwind
1114 define void @test_x86_avx2_maskstore_q_256(i8* %a0, <4 x i64> %a1, <4 x i64> %a2) {
1115 ; X86-LABEL: test_x86_avx2_maskstore_q_256:
1117 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1118 ; X86-NEXT: vpmaskmovq %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0xfd,0x8e,0x08]
1119 ; X86-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
1120 ; X86-NEXT: retl ## encoding: [0xc3]
1122 ; X64-LABEL: test_x86_avx2_maskstore_q_256:
1124 ; X64-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) ## encoding: [0xc4,0xe2,0xfd,0x8e,0x0f]
1125 ; X64-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
1126 ; X64-NEXT: retq ## encoding: [0xc3]
1127 call void @llvm.x86.avx2.maskstore.q.256(i8* %a0, <4 x i64> %a1, <4 x i64> %a2)
1130 declare void @llvm.x86.avx2.maskstore.q.256(i8*, <4 x i64>, <4 x i64>) nounwind
1133 define void @test_x86_avx2_maskstore_d(i8* %a0, <4 x i32> %a1, <4 x i32> %a2) {
1134 ; X86-LABEL: test_x86_avx2_maskstore_d:
1136 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1137 ; X86-NEXT: vpmaskmovd %xmm1, %xmm0, (%eax) ## encoding: [0xc4,0xe2,0x79,0x8e,0x08]
1138 ; X86-NEXT: retl ## encoding: [0xc3]
1140 ; X64-LABEL: test_x86_avx2_maskstore_d:
1142 ; X64-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) ## encoding: [0xc4,0xe2,0x79,0x8e,0x0f]
1143 ; X64-NEXT: retq ## encoding: [0xc3]
1144 call void @llvm.x86.avx2.maskstore.d(i8* %a0, <4 x i32> %a1, <4 x i32> %a2)
1147 declare void @llvm.x86.avx2.maskstore.d(i8*, <4 x i32>, <4 x i32>) nounwind
1150 define void @test_x86_avx2_maskstore_d_256(i8* %a0, <8 x i32> %a1, <8 x i32> %a2) {
1151 ; X86-LABEL: test_x86_avx2_maskstore_d_256:
1153 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1154 ; X86-NEXT: vpmaskmovd %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0x7d,0x8e,0x08]
1155 ; X86-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
1156 ; X86-NEXT: retl ## encoding: [0xc3]
1158 ; X64-LABEL: test_x86_avx2_maskstore_d_256:
1160 ; X64-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) ## encoding: [0xc4,0xe2,0x7d,0x8e,0x0f]
1161 ; X64-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
1162 ; X64-NEXT: retq ## encoding: [0xc3]
1163 call void @llvm.x86.avx2.maskstore.d.256(i8* %a0, <8 x i32> %a1, <8 x i32> %a2)
1166 declare void @llvm.x86.avx2.maskstore.d.256(i8*, <8 x i32>, <8 x i32>) nounwind
1169 define <4 x i32> @test_x86_avx2_psllv_d(<4 x i32> %a0, <4 x i32> %a1) {
1170 ; AVX2-LABEL: test_x86_avx2_psllv_d:
1172 ; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x47,0xc1]
1173 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1175 ; AVX512VL-LABEL: test_x86_avx2_psllv_d:
1176 ; AVX512VL: ## %bb.0:
1177 ; AVX512VL-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0xc1]
1178 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1179 %res = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
1182 declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>) nounwind readnone
1185 define <8 x i32> @test_x86_avx2_psllv_d_256(<8 x i32> %a0, <8 x i32> %a1) {
1186 ; AVX2-LABEL: test_x86_avx2_psllv_d_256:
1188 ; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x47,0xc1]
1189 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1191 ; AVX512VL-LABEL: test_x86_avx2_psllv_d_256:
1192 ; AVX512VL: ## %bb.0:
1193 ; AVX512VL-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0xc1]
1194 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1195 %res = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
1198 declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>) nounwind readnone
1201 define <2 x i64> @test_x86_avx2_psllv_q(<2 x i64> %a0, <2 x i64> %a1) {
1202 ; AVX2-LABEL: test_x86_avx2_psllv_q:
1204 ; AVX2-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x47,0xc1]
1205 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1207 ; AVX512VL-LABEL: test_x86_avx2_psllv_q:
1208 ; AVX512VL: ## %bb.0:
1209 ; AVX512VL-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0xc1]
1210 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1211 %res = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
1214 declare <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64>, <2 x i64>) nounwind readnone
1217 define <4 x i64> @test_x86_avx2_psllv_q_256(<4 x i64> %a0, <4 x i64> %a1) {
1218 ; AVX2-LABEL: test_x86_avx2_psllv_q_256:
1220 ; AVX2-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x47,0xc1]
1221 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1223 ; AVX512VL-LABEL: test_x86_avx2_psllv_q_256:
1224 ; AVX512VL: ## %bb.0:
1225 ; AVX512VL-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0xc1]
1226 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1227 %res = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1]
1230 declare <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64>, <4 x i64>) nounwind readnone
1233 define <4 x i32> @test_x86_avx2_psrlv_d(<4 x i32> %a0, <4 x i32> %a1) {
1234 ; AVX2-LABEL: test_x86_avx2_psrlv_d:
1236 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x45,0xc1]
1237 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1239 ; AVX512VL-LABEL: test_x86_avx2_psrlv_d:
1240 ; AVX512VL: ## %bb.0:
1241 ; AVX512VL-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0xc1]
1242 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1243 %res = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
1246 declare <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32>, <4 x i32>) nounwind readnone
1249 define <8 x i32> @test_x86_avx2_psrlv_d_256(<8 x i32> %a0, <8 x i32> %a1) {
1250 ; AVX2-LABEL: test_x86_avx2_psrlv_d_256:
1252 ; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x45,0xc1]
1253 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1255 ; AVX512VL-LABEL: test_x86_avx2_psrlv_d_256:
1256 ; AVX512VL: ## %bb.0:
1257 ; AVX512VL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0xc1]
1258 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1259 %res = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
1262 declare <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32>, <8 x i32>) nounwind readnone
1265 define <2 x i64> @test_x86_avx2_psrlv_q(<2 x i64> %a0, <2 x i64> %a1) {
1266 ; AVX2-LABEL: test_x86_avx2_psrlv_q:
1268 ; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x45,0xc1]
1269 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1271 ; AVX512VL-LABEL: test_x86_avx2_psrlv_q:
1272 ; AVX512VL: ## %bb.0:
1273 ; AVX512VL-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0xc1]
1274 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1275 %res = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
1278 declare <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64>, <2 x i64>) nounwind readnone
1281 define <4 x i64> @test_x86_avx2_psrlv_q_256(<4 x i64> %a0, <4 x i64> %a1) {
1282 ; AVX2-LABEL: test_x86_avx2_psrlv_q_256:
1284 ; AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x45,0xc1]
1285 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1287 ; AVX512VL-LABEL: test_x86_avx2_psrlv_q_256:
1288 ; AVX512VL: ## %bb.0:
1289 ; AVX512VL-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0xc1]
1290 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1291 %res = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1]
1294 declare <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64>, <4 x i64>) nounwind readnone
1297 define <4 x i32> @test_x86_avx2_psrav_d(<4 x i32> %a0, <4 x i32> %a1) {
1298 ; AVX2-LABEL: test_x86_avx2_psrav_d:
1300 ; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0xc1]
1301 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1303 ; AVX512VL-LABEL: test_x86_avx2_psrav_d:
1304 ; AVX512VL: ## %bb.0:
1305 ; AVX512VL-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0xc1]
1306 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1307 %res = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
1311 define <4 x i32> @test_x86_avx2_psrav_d_const(<4 x i32> %a0, <4 x i32> %a1) {
1312 ; X86-AVX-LABEL: test_x86_avx2_psrav_d_const:
1313 ; X86-AVX: ## %bb.0:
1314 ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23]
1315 ; X86-AVX-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
1316 ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI79_0, kind: FK_Data_4
1317 ; X86-AVX-NEXT: vpsravd LCPI79_1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
1318 ; X86-AVX-NEXT: ## fixup A - offset: 5, value: LCPI79_1, kind: FK_Data_4
1319 ; X86-AVX-NEXT: retl ## encoding: [0xc3]
1321 ; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_const:
1322 ; X86-AVX512VL: ## %bb.0:
1323 ; X86-AVX512VL-NEXT: vmovdqa LCPI79_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
1324 ; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
1325 ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI79_0, kind: FK_Data_4
1326 ; X86-AVX512VL-NEXT: vpsravd LCPI79_1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
1327 ; X86-AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI79_1, kind: FK_Data_4
1328 ; X86-AVX512VL-NEXT: retl ## encoding: [0xc3]
1330 ; X64-AVX-LABEL: test_x86_avx2_psrav_d_const:
1331 ; X64-AVX: ## %bb.0:
1332 ; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23]
1333 ; X64-AVX-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
1334 ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI79_0-4, kind: reloc_riprel_4byte
1335 ; X64-AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
1336 ; X64-AVX-NEXT: ## fixup A - offset: 5, value: LCPI79_1-4, kind: reloc_riprel_4byte
1337 ; X64-AVX-NEXT: retq ## encoding: [0xc3]
1339 ; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_const:
1340 ; X64-AVX512VL: ## %bb.0:
1341 ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
1342 ; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
1343 ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI79_0-4, kind: reloc_riprel_4byte
1344 ; X64-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
1345 ; X64-AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI79_1-4, kind: reloc_riprel_4byte
1346 ; X64-AVX512VL-NEXT: retq ## encoding: [0xc3]
1347 %res = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> <i32 2, i32 9, i32 -12, i32 23>, <4 x i32> <i32 1, i32 18, i32 35, i32 52>)
1350 declare <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32>, <4 x i32>) nounwind readnone
1352 define <8 x i32> @test_x86_avx2_psrav_d_256(<8 x i32> %a0, <8 x i32> %a1) {
1353 ; AVX2-LABEL: test_x86_avx2_psrav_d_256:
1355 ; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0xc1]
1356 ; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1358 ; AVX512VL-LABEL: test_x86_avx2_psrav_d_256:
1359 ; AVX512VL: ## %bb.0:
1360 ; AVX512VL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0xc1]
1361 ; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
1362 %res = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
1366 define <8 x i32> @test_x86_avx2_psrav_d_256_const(<8 x i32> %a0, <8 x i32> %a1) {
1367 ; X86-AVX-LABEL: test_x86_avx2_psrav_d_256_const:
1368 ; X86-AVX: ## %bb.0:
1369 ; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
1370 ; X86-AVX-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
1371 ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI81_0, kind: FK_Data_4
1372 ; X86-AVX-NEXT: vpsravd LCPI81_1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
1373 ; X86-AVX-NEXT: ## fixup A - offset: 5, value: LCPI81_1, kind: FK_Data_4
1374 ; X86-AVX-NEXT: retl ## encoding: [0xc3]
1376 ; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const:
1377 ; X86-AVX512VL: ## %bb.0:
1378 ; X86-AVX512VL-NEXT: vmovdqa LCPI81_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
1379 ; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
1380 ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI81_0, kind: FK_Data_4
1381 ; X86-AVX512VL-NEXT: vpsravd LCPI81_1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
1382 ; X86-AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI81_1, kind: FK_Data_4
1383 ; X86-AVX512VL-NEXT: retl ## encoding: [0xc3]
1385 ; X64-AVX-LABEL: test_x86_avx2_psrav_d_256_const:
1386 ; X64-AVX: ## %bb.0:
1387 ; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
1388 ; X64-AVX-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
1389 ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI81_0-4, kind: reloc_riprel_4byte
1390 ; X64-AVX-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
1391 ; X64-AVX-NEXT: ## fixup A - offset: 5, value: LCPI81_1-4, kind: reloc_riprel_4byte
1392 ; X64-AVX-NEXT: retq ## encoding: [0xc3]
1394 ; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const:
1395 ; X64-AVX512VL: ## %bb.0:
1396 ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
1397 ; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
1398 ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI81_0-4, kind: reloc_riprel_4byte
1399 ; X64-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
1400 ; X64-AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI81_1-4, kind: reloc_riprel_4byte
1401 ; X64-AVX512VL-NEXT: retq ## encoding: [0xc3]
1402 %res = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> <i32 2, i32 9, i32 -12, i32 23, i32 -26, i32 37, i32 -40, i32 51>, <8 x i32> <i32 1, i32 18, i32 35, i32 52, i32 69, i32 15, i32 32, i32 49>)
1405 declare <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32>, <8 x i32>) nounwind readnone
1407 define <2 x double> @test_x86_avx2_gather_d_pd(<2 x double> %a0, i8* %a1, <4 x i32> %idx, <2 x double> %mask) {
1408 ; X86-LABEL: test_x86_avx2_gather_d_pd:
1410 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1411 ; X86-NEXT: vgatherdpd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x92,0x04,0x48]
1412 ; X86-NEXT: retl ## encoding: [0xc3]
1414 ; X64-LABEL: test_x86_avx2_gather_d_pd:
1416 ; X64-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x92,0x04,0x4f]
1417 ; X64-NEXT: retq ## encoding: [0xc3]
1418 %res = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> %a0,
1419 i8* %a1, <4 x i32> %idx, <2 x double> %mask, i8 2) ;
1420 ret <2 x double> %res
1422 declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, i8*,
1423 <4 x i32>, <2 x double>, i8) nounwind readonly
1425 define <4 x double> @test_x86_avx2_gather_d_pd_256(<4 x double> %a0, i8* %a1, <4 x i32> %idx, <4 x double> %mask) {
1426 ; X86-LABEL: test_x86_avx2_gather_d_pd_256:
1428 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1429 ; X86-NEXT: vgatherdpd %ymm2, (%eax,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x92,0x04,0x48]
1430 ; X86-NEXT: retl ## encoding: [0xc3]
1432 ; X64-LABEL: test_x86_avx2_gather_d_pd_256:
1434 ; X64-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x92,0x04,0x4f]
1435 ; X64-NEXT: retq ## encoding: [0xc3]
1436 %res = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %a0,
1437 i8* %a1, <4 x i32> %idx, <4 x double> %mask, i8 2) ;
1438 ret <4 x double> %res
1440 declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, i8*,
1441 <4 x i32>, <4 x double>, i8) nounwind readonly
1443 define <2 x double> @test_x86_avx2_gather_q_pd(<2 x double> %a0, i8* %a1, <2 x i64> %idx, <2 x double> %mask) {
1444 ; X86-LABEL: test_x86_avx2_gather_q_pd:
1446 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1447 ; X86-NEXT: vgatherqpd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x93,0x04,0x48]
1448 ; X86-NEXT: retl ## encoding: [0xc3]
1450 ; X64-LABEL: test_x86_avx2_gather_q_pd:
1452 ; X64-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x93,0x04,0x4f]
1453 ; X64-NEXT: retq ## encoding: [0xc3]
1454 %res = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> %a0,
1455 i8* %a1, <2 x i64> %idx, <2 x double> %mask, i8 2) ;
1456 ret <2 x double> %res
1458 declare <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double>, i8*,
1459 <2 x i64>, <2 x double>, i8) nounwind readonly
1461 define <4 x double> @test_x86_avx2_gather_q_pd_256(<4 x double> %a0, i8* %a1, <4 x i64> %idx, <4 x double> %mask) {
1462 ; X86-LABEL: test_x86_avx2_gather_q_pd_256:
1464 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1465 ; X86-NEXT: vgatherqpd %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x93,0x04,0x48]
1466 ; X86-NEXT: retl ## encoding: [0xc3]
1468 ; X64-LABEL: test_x86_avx2_gather_q_pd_256:
1470 ; X64-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x93,0x04,0x4f]
1471 ; X64-NEXT: retq ## encoding: [0xc3]
1472 %res = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %a0,
1473 i8* %a1, <4 x i64> %idx, <4 x double> %mask, i8 2) ;
1474 ret <4 x double> %res
1476 declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double>, i8*,
1477 <4 x i64>, <4 x double>, i8) nounwind readonly
1479 define <4 x float> @test_x86_avx2_gather_d_ps(<4 x float> %a0, i8* %a1, <4 x i32> %idx, <4 x float> %mask) {
1480 ; X86-LABEL: test_x86_avx2_gather_d_ps:
1482 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1483 ; X86-NEXT: vgatherdps %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x92,0x04,0x48]
1484 ; X86-NEXT: retl ## encoding: [0xc3]
1486 ; X64-LABEL: test_x86_avx2_gather_d_ps:
1488 ; X64-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x92,0x04,0x4f]
1489 ; X64-NEXT: retq ## encoding: [0xc3]
1490 %res = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> %a0,
1491 i8* %a1, <4 x i32> %idx, <4 x float> %mask, i8 2) ;
1492 ret <4 x float> %res
1494 declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, i8*,
1495 <4 x i32>, <4 x float>, i8) nounwind readonly
1497 define <8 x float> @test_x86_avx2_gather_d_ps_256(<8 x float> %a0, i8* %a1, <8 x i32> %idx, <8 x float> %mask) {
1498 ; X86-LABEL: test_x86_avx2_gather_d_ps_256:
1500 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1501 ; X86-NEXT: vgatherdps %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x92,0x04,0x48]
1502 ; X86-NEXT: retl ## encoding: [0xc3]
1504 ; X64-LABEL: test_x86_avx2_gather_d_ps_256:
1506 ; X64-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x92,0x04,0x4f]
1507 ; X64-NEXT: retq ## encoding: [0xc3]
1508 %res = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %a0,
1509 i8* %a1, <8 x i32> %idx, <8 x float> %mask, i8 2) ;
1510 ret <8 x float> %res
1512 declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, i8*,
1513 <8 x i32>, <8 x float>, i8) nounwind readonly
1515 define <4 x float> @test_x86_avx2_gather_q_ps(<4 x float> %a0, i8* %a1, <2 x i64> %idx, <4 x float> %mask) {
1516 ; X86-LABEL: test_x86_avx2_gather_q_ps:
1518 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1519 ; X86-NEXT: vgatherqps %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x93,0x04,0x48]
1520 ; X86-NEXT: retl ## encoding: [0xc3]
1522 ; X64-LABEL: test_x86_avx2_gather_q_ps:
1524 ; X64-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x93,0x04,0x4f]
1525 ; X64-NEXT: retq ## encoding: [0xc3]
1526 %res = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> %a0,
1527 i8* %a1, <2 x i64> %idx, <4 x float> %mask, i8 2) ;
1528 ret <4 x float> %res
1530 declare <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float>, i8*,
1531 <2 x i64>, <4 x float>, i8) nounwind readonly
1533 define <4 x float> @test_x86_avx2_gather_q_ps_256(<4 x float> %a0, i8* %a1, <4 x i64> %idx, <4 x float> %mask) {
1534 ; X86-LABEL: test_x86_avx2_gather_q_ps_256:
1536 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1537 ; X86-NEXT: vgatherqps %xmm2, (%eax,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x93,0x04,0x48]
1538 ; X86-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
1539 ; X86-NEXT: retl ## encoding: [0xc3]
1541 ; X64-LABEL: test_x86_avx2_gather_q_ps_256:
1543 ; X64-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x93,0x04,0x4f]
1544 ; X64-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
1545 ; X64-NEXT: retq ## encoding: [0xc3]
1546 %res = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %a0,
1547 i8* %a1, <4 x i64> %idx, <4 x float> %mask, i8 2) ;
1548 ret <4 x float> %res
1550 declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float>, i8*,
1551 <4 x i64>, <4 x float>, i8) nounwind readonly
1553 define <2 x i64> @test_x86_avx2_gather_d_q(<2 x i64> %a0, i8* %a1, <4 x i32> %idx, <2 x i64> %mask) {
1554 ; X86-LABEL: test_x86_avx2_gather_d_q:
1556 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1557 ; X86-NEXT: vpgatherdq %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x90,0x04,0x48]
1558 ; X86-NEXT: retl ## encoding: [0xc3]
1560 ; X64-LABEL: test_x86_avx2_gather_d_q:
1562 ; X64-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x90,0x04,0x4f]
1563 ; X64-NEXT: retq ## encoding: [0xc3]
1564 %res = call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> %a0,
1565 i8* %a1, <4 x i32> %idx, <2 x i64> %mask, i8 2) ;
1568 declare <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64>, i8*,
1569 <4 x i32>, <2 x i64>, i8) nounwind readonly
1571 define <4 x i64> @test_x86_avx2_gather_d_q_256(<4 x i64> %a0, i8* %a1, <4 x i32> %idx, <4 x i64> %mask) {
1572 ; X86-LABEL: test_x86_avx2_gather_d_q_256:
1574 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1575 ; X86-NEXT: vpgatherdq %ymm2, (%eax,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x90,0x04,0x48]
1576 ; X86-NEXT: retl ## encoding: [0xc3]
1578 ; X64-LABEL: test_x86_avx2_gather_d_q_256:
1580 ; X64-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x90,0x04,0x4f]
1581 ; X64-NEXT: retq ## encoding: [0xc3]
1582 %res = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %a0,
1583 i8* %a1, <4 x i32> %idx, <4 x i64> %mask, i8 2) ;
1586 declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64>, i8*,
1587 <4 x i32>, <4 x i64>, i8) nounwind readonly
1589 define <2 x i64> @test_x86_avx2_gather_q_q(<2 x i64> %a0, i8* %a1, <2 x i64> %idx, <2 x i64> %mask) {
1590 ; X86-LABEL: test_x86_avx2_gather_q_q:
1592 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1593 ; X86-NEXT: vpgatherqq %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x91,0x04,0x48]
1594 ; X86-NEXT: retl ## encoding: [0xc3]
1596 ; X64-LABEL: test_x86_avx2_gather_q_q:
1598 ; X64-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x91,0x04,0x4f]
1599 ; X64-NEXT: retq ## encoding: [0xc3]
1600 %res = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> %a0,
1601 i8* %a1, <2 x i64> %idx, <2 x i64> %mask, i8 2) ;
1604 declare <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64>, i8*,
1605 <2 x i64>, <2 x i64>, i8) nounwind readonly
1607 define <4 x i64> @test_x86_avx2_gather_q_q_256(<4 x i64> %a0, i8* %a1, <4 x i64> %idx, <4 x i64> %mask) {
1608 ; X86-LABEL: test_x86_avx2_gather_q_q_256:
1610 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1611 ; X86-NEXT: vpgatherqq %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x91,0x04,0x48]
1612 ; X86-NEXT: retl ## encoding: [0xc3]
1614 ; X64-LABEL: test_x86_avx2_gather_q_q_256:
1616 ; X64-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x91,0x04,0x4f]
1617 ; X64-NEXT: retq ## encoding: [0xc3]
1618 %res = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %a0,
1619 i8* %a1, <4 x i64> %idx, <4 x i64> %mask, i8 2) ;
1622 declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64>, i8*,
1623 <4 x i64>, <4 x i64>, i8) nounwind readonly
1625 define <4 x i32> @test_x86_avx2_gather_d_d(<4 x i32> %a0, i8* %a1, <4 x i32> %idx, <4 x i32> %mask) {
1626 ; X86-LABEL: test_x86_avx2_gather_d_d:
1628 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1629 ; X86-NEXT: vpgatherdd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x90,0x04,0x48]
1630 ; X86-NEXT: retl ## encoding: [0xc3]
1632 ; X64-LABEL: test_x86_avx2_gather_d_d:
1634 ; X64-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x90,0x04,0x4f]
1635 ; X64-NEXT: retq ## encoding: [0xc3]
1636 %res = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> %a0,
1637 i8* %a1, <4 x i32> %idx, <4 x i32> %mask, i8 2) ;
1640 declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, i8*,
1641 <4 x i32>, <4 x i32>, i8) nounwind readonly
1643 define <8 x i32> @test_x86_avx2_gather_d_d_256(<8 x i32> %a0, i8* %a1, <8 x i32> %idx, <8 x i32> %mask) {
1644 ; X86-LABEL: test_x86_avx2_gather_d_d_256:
1646 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1647 ; X86-NEXT: vpgatherdd %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x90,0x04,0x48]
1648 ; X86-NEXT: retl ## encoding: [0xc3]
1650 ; X64-LABEL: test_x86_avx2_gather_d_d_256:
1652 ; X64-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x90,0x04,0x4f]
1653 ; X64-NEXT: retq ## encoding: [0xc3]
1654 %res = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %a0,
1655 i8* %a1, <8 x i32> %idx, <8 x i32> %mask, i8 2) ;
1658 declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32>, i8*,
1659 <8 x i32>, <8 x i32>, i8) nounwind readonly
1661 define <4 x i32> @test_x86_avx2_gather_q_d(<4 x i32> %a0, i8* %a1, <2 x i64> %idx, <4 x i32> %mask) {
1662 ; X86-LABEL: test_x86_avx2_gather_q_d:
1664 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1665 ; X86-NEXT: vpgatherqd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x91,0x04,0x48]
1666 ; X86-NEXT: retl ## encoding: [0xc3]
1668 ; X64-LABEL: test_x86_avx2_gather_q_d:
1670 ; X64-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x91,0x04,0x4f]
1671 ; X64-NEXT: retq ## encoding: [0xc3]
1672 %res = call <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32> %a0,
1673 i8* %a1, <2 x i64> %idx, <4 x i32> %mask, i8 2) ;
1676 declare <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32>, i8*,
1677 <2 x i64>, <4 x i32>, i8) nounwind readonly
1679 define <4 x i32> @test_x86_avx2_gather_q_d_256(<4 x i32> %a0, i8* %a1, <4 x i64> %idx, <4 x i32> %mask) {
1680 ; X86-LABEL: test_x86_avx2_gather_q_d_256:
1682 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1683 ; X86-NEXT: vpgatherqd %xmm2, (%eax,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x91,0x04,0x48]
1684 ; X86-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
1685 ; X86-NEXT: retl ## encoding: [0xc3]
1687 ; X64-LABEL: test_x86_avx2_gather_q_d_256:
1689 ; X64-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x91,0x04,0x4f]
1690 ; X64-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
1691 ; X64-NEXT: retq ## encoding: [0xc3]
1692 %res = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %a0,
1693 i8* %a1, <4 x i64> %idx, <4 x i32> %mask, i8 2) ;
1696 declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32>, i8*,
1697 <4 x i64>, <4 x i32>, i8) nounwind readonly
1700 define <8 x float> @test_gather_mask(<8 x float> %a0, float* %a, <8 x i32> %idx, <8 x float> %mask, float* nocapture %out) {
1702 ; X86-AVX-LABEL: test_gather_mask:
1703 ; X86-AVX: ## %bb.0:
1704 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
1705 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
1706 ; X86-AVX-NEXT: vmovaps %ymm2, %ymm3 ## encoding: [0xc5,0xfc,0x28,0xda]
1707 ; X86-AVX-NEXT: vgatherdps %ymm3, (%ecx,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x89]
1708 ; X86-AVX-NEXT: vmovups %ymm2, (%eax) ## encoding: [0xc5,0xfc,0x11,0x10]
1709 ; X86-AVX-NEXT: retl ## encoding: [0xc3]
1711 ; X86-AVX512VL-LABEL: test_gather_mask:
1712 ; X86-AVX512VL: ## %bb.0:
1713 ; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
1714 ; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
1715 ; X86-AVX512VL-NEXT: vmovaps %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda]
1716 ; X86-AVX512VL-NEXT: vgatherdps %ymm3, (%ecx,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x89]
1717 ; X86-AVX512VL-NEXT: vmovups %ymm2, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x10]
1718 ; X86-AVX512VL-NEXT: retl ## encoding: [0xc3]
1720 ; X64-AVX-LABEL: test_gather_mask:
1721 ; X64-AVX: ## %bb.0:
1722 ; X64-AVX-NEXT: vmovaps %ymm2, %ymm3 ## encoding: [0xc5,0xfc,0x28,0xda]
1723 ; X64-AVX-NEXT: vgatherdps %ymm3, (%rdi,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x8f]
1724 ; X64-AVX-NEXT: vmovups %ymm2, (%rsi) ## encoding: [0xc5,0xfc,0x11,0x16]
1725 ; X64-AVX-NEXT: retq ## encoding: [0xc3]
1727 ; X64-AVX512VL-LABEL: test_gather_mask:
1728 ; X64-AVX512VL: ## %bb.0:
1729 ; X64-AVX512VL-NEXT: vmovaps %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda]
1730 ; X64-AVX512VL-NEXT: vgatherdps %ymm3, (%rdi,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x8f]
1731 ; X64-AVX512VL-NEXT: vmovups %ymm2, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x16]
1732 ; X64-AVX512VL-NEXT: retq ## encoding: [0xc3]
1733 %a_i8 = bitcast float* %a to i8*
1734 %res = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %a0,
1735 i8* %a_i8, <8 x i32> %idx, <8 x float> %mask, i8 4) ;
1737 ;; for debugging, we'll just dump out the mask
1738 %out_ptr = bitcast float * %out to <8 x float> *
1739 store <8 x float> %mask, <8 x float> * %out_ptr, align 4
1741 ret <8 x float> %res