1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=X32,X32-SLOW
3 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=X32,X32-FAST-ALL
4 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=X32,X32-FAST-PERLANE
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=X64,X64-SLOW
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=X64,X64-FAST-ALL
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=X64,X64-FAST-PERLANE
9 define <4 x i32> @trunc4(<4 x i64> %A) nounwind {
10 ; X32-SLOW-LABEL: trunc4:
12 ; X32-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
13 ; X32-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
14 ; X32-SLOW-NEXT: vzeroupper
17 ; X32-FAST-ALL-LABEL: trunc4:
18 ; X32-FAST-ALL: # %bb.0:
19 ; X32-FAST-ALL-NEXT: vmovaps {{.*#+}} ymm1 = <0,2,4,6,u,u,u,u>
20 ; X32-FAST-ALL-NEXT: vpermps %ymm0, %ymm1, %ymm0
21 ; X32-FAST-ALL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
22 ; X32-FAST-ALL-NEXT: vzeroupper
23 ; X32-FAST-ALL-NEXT: retl
25 ; X32-FAST-PERLANE-LABEL: trunc4:
26 ; X32-FAST-PERLANE: # %bb.0:
27 ; X32-FAST-PERLANE-NEXT: vextractf128 $1, %ymm0, %xmm1
28 ; X32-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
29 ; X32-FAST-PERLANE-NEXT: vzeroupper
30 ; X32-FAST-PERLANE-NEXT: retl
32 ; X64-SLOW-LABEL: trunc4:
34 ; X64-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
35 ; X64-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
36 ; X64-SLOW-NEXT: vzeroupper
39 ; X64-FAST-ALL-LABEL: trunc4:
40 ; X64-FAST-ALL: # %bb.0:
41 ; X64-FAST-ALL-NEXT: vmovaps {{.*#+}} ymm1 = <0,2,4,6,u,u,u,u>
42 ; X64-FAST-ALL-NEXT: vpermps %ymm0, %ymm1, %ymm0
43 ; X64-FAST-ALL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
44 ; X64-FAST-ALL-NEXT: vzeroupper
45 ; X64-FAST-ALL-NEXT: retq
47 ; X64-FAST-PERLANE-LABEL: trunc4:
48 ; X64-FAST-PERLANE: # %bb.0:
49 ; X64-FAST-PERLANE-NEXT: vextractf128 $1, %ymm0, %xmm1
50 ; X64-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
51 ; X64-FAST-PERLANE-NEXT: vzeroupper
52 ; X64-FAST-PERLANE-NEXT: retq
53 %B = trunc <4 x i64> %A to <4 x i32>
57 define <8 x i16> @trunc8(<8 x i32> %A) nounwind {
60 ; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
61 ; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
62 ; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
63 ; X32-NEXT: vzeroupper
68 ; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
69 ; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
70 ; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
71 ; X64-NEXT: vzeroupper
73 %B = trunc <8 x i32> %A to <8 x i16>
77 define <4 x i64> @sext4(<4 x i32> %A) nounwind {
80 ; X32-NEXT: vpmovsxdq %xmm0, %ymm0
85 ; X64-NEXT: vpmovsxdq %xmm0, %ymm0
87 %B = sext <4 x i32> %A to <4 x i64>
91 define <8 x i32> @sext8(<8 x i16> %A) nounwind {
94 ; X32-NEXT: vpmovsxwd %xmm0, %ymm0
99 ; X64-NEXT: vpmovsxwd %xmm0, %ymm0
101 %B = sext <8 x i16> %A to <8 x i32>
105 define <4 x i64> @zext4(<4 x i32> %A) nounwind {
108 ; X32-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
113 ; X64-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
115 %B = zext <4 x i32> %A to <4 x i64>
119 define <8 x i32> @zext8(<8 x i16> %A) nounwind {
122 ; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
127 ; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
129 %B = zext <8 x i16> %A to <8 x i32>
133 define <8 x i32> @zext_8i8_8i32(<8 x i8> %A) nounwind {
134 ; X32-LABEL: zext_8i8_8i32:
136 ; X32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
139 ; X64-LABEL: zext_8i8_8i32:
141 ; X64-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
143 %B = zext <8 x i8> %A to <8 x i32>
147 define <16 x i16> @zext_16i8_16i16(<16 x i8> %z) {
148 ; X32-LABEL: zext_16i8_16i16:
150 ; X32-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
153 ; X64-LABEL: zext_16i8_16i16:
155 ; X64-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
157 %t = zext <16 x i8> %z to <16 x i16>
161 define <16 x i16> @sext_16i8_16i16(<16 x i8> %z) {
162 ; X32-LABEL: sext_16i8_16i16:
164 ; X32-NEXT: vpmovsxbw %xmm0, %ymm0
167 ; X64-LABEL: sext_16i8_16i16:
169 ; X64-NEXT: vpmovsxbw %xmm0, %ymm0
171 %t = sext <16 x i8> %z to <16 x i16>
175 define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
176 ; X32-LABEL: trunc_16i16_16i8:
178 ; X32-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
179 ; X32-NEXT: vextracti128 $1, %ymm0, %xmm1
180 ; X32-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
181 ; X32-NEXT: vzeroupper
184 ; X64-LABEL: trunc_16i16_16i8:
186 ; X64-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
187 ; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
188 ; X64-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
189 ; X64-NEXT: vzeroupper
191 %t = trunc <16 x i16> %z to <16 x i8>
195 define <4 x i64> @load_sext_test1(<4 x i32> *%ptr) {
196 ; X32-LABEL: load_sext_test1:
198 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
199 ; X32-NEXT: vpmovsxdq (%eax), %ymm0
202 ; X64-LABEL: load_sext_test1:
204 ; X64-NEXT: vpmovsxdq (%rdi), %ymm0
206 %X = load <4 x i32>, <4 x i32>* %ptr
207 %Y = sext <4 x i32> %X to <4 x i64>
211 define <4 x i64> @load_sext_test2(<4 x i8> *%ptr) {
212 ; X32-LABEL: load_sext_test2:
214 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
215 ; X32-NEXT: vpmovsxbq (%eax), %ymm0
218 ; X64-LABEL: load_sext_test2:
220 ; X64-NEXT: vpmovsxbq (%rdi), %ymm0
222 %X = load <4 x i8>, <4 x i8>* %ptr
223 %Y = sext <4 x i8> %X to <4 x i64>
227 define <4 x i64> @load_sext_test3(<4 x i16> *%ptr) {
228 ; X32-LABEL: load_sext_test3:
230 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
231 ; X32-NEXT: vpmovsxwq (%eax), %ymm0
234 ; X64-LABEL: load_sext_test3:
236 ; X64-NEXT: vpmovsxwq (%rdi), %ymm0
238 %X = load <4 x i16>, <4 x i16>* %ptr
239 %Y = sext <4 x i16> %X to <4 x i64>
243 define <8 x i32> @load_sext_test4(<8 x i16> *%ptr) {
244 ; X32-LABEL: load_sext_test4:
246 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
247 ; X32-NEXT: vpmovsxwd (%eax), %ymm0
250 ; X64-LABEL: load_sext_test4:
252 ; X64-NEXT: vpmovsxwd (%rdi), %ymm0
254 %X = load <8 x i16>, <8 x i16>* %ptr
255 %Y = sext <8 x i16> %X to <8 x i32>
259 define <8 x i32> @load_sext_test5(<8 x i8> *%ptr) {
260 ; X32-LABEL: load_sext_test5:
262 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
263 ; X32-NEXT: vpmovsxbd (%eax), %ymm0
266 ; X64-LABEL: load_sext_test5:
268 ; X64-NEXT: vpmovsxbd (%rdi), %ymm0
270 %X = load <8 x i8>, <8 x i8>* %ptr
271 %Y = sext <8 x i8> %X to <8 x i32>