1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+sse4.1 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE41
3 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
4 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512vl,avx512bw | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
6 define <8 x i16> @test_llvm_x86_sse41_pmovsxbw(<16 x i8>* %a) {
7 ; SSE41-LABEL: test_llvm_x86_sse41_pmovsxbw:
9 ; SSE41-NEXT: pmovsxbw (%rdi), %xmm0
12 ; AVX-LABEL: test_llvm_x86_sse41_pmovsxbw:
14 ; AVX-NEXT: vpmovsxbw (%rdi), %xmm0
16 %1 = load <16 x i8>, <16 x i8>* %a, align 1
17 %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
18 %3 = sext <8 x i8> %2 to <8 x i16>
22 define <4 x i32> @test_llvm_x86_sse41_pmovsxbd(<16 x i8>* %a) {
23 ; SSE41-LABEL: test_llvm_x86_sse41_pmovsxbd:
25 ; SSE41-NEXT: pmovsxbd (%rdi), %xmm0
28 ; AVX-LABEL: test_llvm_x86_sse41_pmovsxbd:
30 ; AVX-NEXT: vpmovsxbd (%rdi), %xmm0
32 %1 = load <16 x i8>, <16 x i8>* %a, align 1
33 %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
34 %3 = sext <4 x i8> %2 to <4 x i32>
38 define <2 x i64> @test_llvm_x86_sse41_pmovsxbq(<16 x i8>* %a) {
39 ; SSE41-LABEL: test_llvm_x86_sse41_pmovsxbq:
41 ; SSE41-NEXT: pmovsxbq (%rdi), %xmm0
44 ; AVX-LABEL: test_llvm_x86_sse41_pmovsxbq:
46 ; AVX-NEXT: vpmovsxbq (%rdi), %xmm0
48 %1 = load <16 x i8>, <16 x i8>* %a, align 1
49 %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
50 %3 = sext <2 x i8> %2 to <2 x i64>
54 define <4 x i32> @test_llvm_x86_sse41_pmovsxwd(<8 x i16>* %a) {
55 ; SSE41-LABEL: test_llvm_x86_sse41_pmovsxwd:
57 ; SSE41-NEXT: pmovsxwd (%rdi), %xmm0
60 ; AVX-LABEL: test_llvm_x86_sse41_pmovsxwd:
62 ; AVX-NEXT: vpmovsxwd (%rdi), %xmm0
64 %1 = load <8 x i16>, <8 x i16>* %a, align 1
65 %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
66 %3 = sext <4 x i16> %2 to <4 x i32>
70 define <2 x i64> @test_llvm_x86_sse41_pmovsxwq(<8 x i16>* %a) {
71 ; SSE41-LABEL: test_llvm_x86_sse41_pmovsxwq:
73 ; SSE41-NEXT: pmovsxwq (%rdi), %xmm0
76 ; AVX-LABEL: test_llvm_x86_sse41_pmovsxwq:
78 ; AVX-NEXT: vpmovsxwq (%rdi), %xmm0
80 %1 = load <8 x i16>, <8 x i16>* %a, align 1
81 %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
82 %3 = sext <2 x i16> %2 to <2 x i64>
86 define <2 x i64> @test_llvm_x86_sse41_pmovsxdq(<4 x i32>* %a) {
87 ; SSE41-LABEL: test_llvm_x86_sse41_pmovsxdq:
89 ; SSE41-NEXT: pmovsxdq (%rdi), %xmm0
92 ; AVX-LABEL: test_llvm_x86_sse41_pmovsxdq:
94 ; AVX-NEXT: vpmovsxdq (%rdi), %xmm0
96 %1 = load <4 x i32>, <4 x i32>* %a, align 1
97 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
98 %3 = sext <2 x i32> %2 to <2 x i64>
102 define <8 x i16> @test_llvm_x86_sse41_pmovzxbw(<16 x i8>* %a) {
103 ; SSE41-LABEL: test_llvm_x86_sse41_pmovzxbw:
105 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
108 ; AVX-LABEL: test_llvm_x86_sse41_pmovzxbw:
110 ; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
112 %1 = load <16 x i8>, <16 x i8>* %a, align 1
113 %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
114 %3 = zext <8 x i8> %2 to <8 x i16>
118 define <4 x i32> @test_llvm_x86_sse41_pmovzxbd(<16 x i8>* %a) {
119 ; SSE41-LABEL: test_llvm_x86_sse41_pmovzxbd:
121 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
124 ; AVX-LABEL: test_llvm_x86_sse41_pmovzxbd:
126 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
128 %1 = load <16 x i8>, <16 x i8>* %a, align 1
129 %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
130 %3 = zext <4 x i8> %2 to <4 x i32>
134 define <2 x i64> @test_llvm_x86_sse41_pmovzxbq(<16 x i8>* %a) {
135 ; SSE41-LABEL: test_llvm_x86_sse41_pmovzxbq:
137 ; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
140 ; AVX-LABEL: test_llvm_x86_sse41_pmovzxbq:
142 ; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
144 %1 = load <16 x i8>, <16 x i8>* %a, align 1
145 %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
146 %3 = zext <2 x i8> %2 to <2 x i64>
150 define <4 x i32> @test_llvm_x86_sse41_pmovzxwd(<8 x i16>* %a) {
151 ; SSE41-LABEL: test_llvm_x86_sse41_pmovzxwd:
153 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
156 ; AVX-LABEL: test_llvm_x86_sse41_pmovzxwd:
158 ; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
160 %1 = load <8 x i16>, <8 x i16>* %a, align 1
161 %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
162 %3 = zext <4 x i16> %2 to <4 x i32>
166 define <2 x i64> @test_llvm_x86_sse41_pmovzxwq(<8 x i16>* %a) {
167 ; SSE41-LABEL: test_llvm_x86_sse41_pmovzxwq:
169 ; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
172 ; AVX-LABEL: test_llvm_x86_sse41_pmovzxwq:
174 ; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
176 %1 = load <8 x i16>, <8 x i16>* %a, align 1
177 %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
178 %3 = zext <2 x i16> %2 to <2 x i64>
182 define <2 x i64> @test_llvm_x86_sse41_pmovzxdq(<4 x i32>* %a) {
183 ; SSE41-LABEL: test_llvm_x86_sse41_pmovzxdq:
185 ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
188 ; AVX-LABEL: test_llvm_x86_sse41_pmovzxdq:
190 ; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
192 %1 = load <4 x i32>, <4 x i32>* %a, align 1
193 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
194 %3 = zext <2 x i32> %2 to <2 x i64>