1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE
3 ; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X86 --check-prefix=X86-AVX --check-prefix=X86-AVX1
4 ; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-AVX --check-prefix=X86-AVX2
5 ; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE
6 ; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX1
7 ; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX2
9 @c = external global i32*, align 8
11 ; %val1 = load <2 x i8>
12 ; %op1 = zext<2 x i32> %val1
13 ; %val2 = load <2 x i8>
14 ; %op2 = zext<2 x i32> %val2
15 ; %rst = mul <2 x i32> %op1, %op2
17 define void @mul_2xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) nounwind {
18 ; X86-SSE-LABEL: mul_2xi8:
19 ; X86-SSE: # %bb.0: # %entry
20 ; X86-SSE-NEXT: pushl %esi
21 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
22 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
23 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
24 ; X86-SSE-NEXT: movl c, %esi
25 ; X86-SSE-NEXT: movzwl (%edx,%ecx), %edx
26 ; X86-SSE-NEXT: movd %edx, %xmm0
27 ; X86-SSE-NEXT: movzwl (%eax,%ecx), %eax
28 ; X86-SSE-NEXT: movd %eax, %xmm1
29 ; X86-SSE-NEXT: pxor %xmm2, %xmm2
30 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
31 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
32 ; X86-SSE-NEXT: pmullw %xmm0, %xmm1
33 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
34 ; X86-SSE-NEXT: movq %xmm1, (%esi,%ecx,4)
35 ; X86-SSE-NEXT: popl %esi
38 ; X86-AVX-LABEL: mul_2xi8:
39 ; X86-AVX: # %bb.0: # %entry
40 ; X86-AVX-NEXT: pushl %esi
41 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
42 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
43 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
44 ; X86-AVX-NEXT: movl c, %esi
45 ; X86-AVX-NEXT: movzwl (%edx,%ecx), %edx
46 ; X86-AVX-NEXT: vmovd %edx, %xmm0
47 ; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
48 ; X86-AVX-NEXT: movzwl (%eax,%ecx), %eax
49 ; X86-AVX-NEXT: vmovd %eax, %xmm1
50 ; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
51 ; X86-AVX-NEXT: vpmaddwd %xmm0, %xmm1, %xmm0
52 ; X86-AVX-NEXT: vmovq %xmm0, (%esi,%ecx,4)
53 ; X86-AVX-NEXT: popl %esi
56 ; X64-SSE-LABEL: mul_2xi8:
57 ; X64-SSE: # %bb.0: # %entry
58 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
59 ; X64-SSE-NEXT: movzwl (%rdi,%rdx), %ecx
60 ; X64-SSE-NEXT: movd %ecx, %xmm0
61 ; X64-SSE-NEXT: movzwl (%rsi,%rdx), %ecx
62 ; X64-SSE-NEXT: movd %ecx, %xmm1
63 ; X64-SSE-NEXT: pxor %xmm2, %xmm2
64 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
65 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
66 ; X64-SSE-NEXT: pmullw %xmm0, %xmm1
67 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
68 ; X64-SSE-NEXT: movq %xmm1, (%rax,%rdx,4)
71 ; X64-AVX-LABEL: mul_2xi8:
72 ; X64-AVX: # %bb.0: # %entry
73 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
74 ; X64-AVX-NEXT: movzwl (%rdi,%rdx), %ecx
75 ; X64-AVX-NEXT: vmovd %ecx, %xmm0
76 ; X64-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
77 ; X64-AVX-NEXT: movzwl (%rsi,%rdx), %ecx
78 ; X64-AVX-NEXT: vmovd %ecx, %xmm1
79 ; X64-AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
80 ; X64-AVX-NEXT: vpmaddwd %xmm0, %xmm1, %xmm0
81 ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rdx,4)
84 %pre = load i32*, i32** @c
85 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
86 %tmp7 = bitcast i8* %tmp6 to <2 x i8>*
87 %wide.load = load <2 x i8>, <2 x i8>* %tmp7, align 1
88 %tmp8 = zext <2 x i8> %wide.load to <2 x i32>
89 %tmp10 = getelementptr inbounds i8, i8* %b, i64 %index
90 %tmp11 = bitcast i8* %tmp10 to <2 x i8>*
91 %wide.load17 = load <2 x i8>, <2 x i8>* %tmp11, align 1
92 %tmp12 = zext <2 x i8> %wide.load17 to <2 x i32>
93 %tmp13 = mul nuw nsw <2 x i32> %tmp12, %tmp8
94 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
95 %tmp15 = bitcast i32* %tmp14 to <2 x i32>*
96 store <2 x i32> %tmp13, <2 x i32>* %tmp15, align 4
100 ; %val1 = load <4 x i8>
101 ; %op1 = zext<4 x i32> %val1
102 ; %val2 = load <4 x i8>
103 ; %op2 = zext<4 x i32> %val2
104 ; %rst = mul <4 x i32> %op1, %op2
106 define void @mul_4xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) nounwind {
107 ; X86-SSE-LABEL: mul_4xi8:
108 ; X86-SSE: # %bb.0: # %entry
109 ; X86-SSE-NEXT: pushl %esi
110 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
111 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
112 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
113 ; X86-SSE-NEXT: movl c, %esi
114 ; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
115 ; X86-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
116 ; X86-SSE-NEXT: pxor %xmm2, %xmm2
117 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
118 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
119 ; X86-SSE-NEXT: pmullw %xmm0, %xmm1
120 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
121 ; X86-SSE-NEXT: movdqu %xmm1, (%esi,%ecx,4)
122 ; X86-SSE-NEXT: popl %esi
125 ; X86-AVX-LABEL: mul_4xi8:
126 ; X86-AVX: # %bb.0: # %entry
127 ; X86-AVX-NEXT: pushl %esi
128 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
129 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
130 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
131 ; X86-AVX-NEXT: movl c, %esi
132 ; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
133 ; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
134 ; X86-AVX-NEXT: vpmaddwd %xmm0, %xmm1, %xmm0
135 ; X86-AVX-NEXT: vmovdqu %xmm0, (%esi,%ecx,4)
136 ; X86-AVX-NEXT: popl %esi
139 ; X64-SSE-LABEL: mul_4xi8:
140 ; X64-SSE: # %bb.0: # %entry
141 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
142 ; X64-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
143 ; X64-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
144 ; X64-SSE-NEXT: pxor %xmm2, %xmm2
145 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
146 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
147 ; X64-SSE-NEXT: pmullw %xmm0, %xmm1
148 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
149 ; X64-SSE-NEXT: movdqu %xmm1, (%rax,%rdx,4)
152 ; X64-AVX-LABEL: mul_4xi8:
153 ; X64-AVX: # %bb.0: # %entry
154 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
155 ; X64-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
156 ; X64-AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
157 ; X64-AVX-NEXT: vpmaddwd %xmm0, %xmm1, %xmm0
158 ; X64-AVX-NEXT: vmovdqu %xmm0, (%rax,%rdx,4)
161 %pre = load i32*, i32** @c
162 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
163 %tmp7 = bitcast i8* %tmp6 to <4 x i8>*
164 %wide.load = load <4 x i8>, <4 x i8>* %tmp7, align 1
165 %tmp8 = zext <4 x i8> %wide.load to <4 x i32>
166 %tmp10 = getelementptr inbounds i8, i8* %b, i64 %index
167 %tmp11 = bitcast i8* %tmp10 to <4 x i8>*
168 %wide.load17 = load <4 x i8>, <4 x i8>* %tmp11, align 1
169 %tmp12 = zext <4 x i8> %wide.load17 to <4 x i32>
170 %tmp13 = mul nuw nsw <4 x i32> %tmp12, %tmp8
171 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
172 %tmp15 = bitcast i32* %tmp14 to <4 x i32>*
173 store <4 x i32> %tmp13, <4 x i32>* %tmp15, align 4
177 ; %val1 = load <8 x i8>
178 ; %op1 = zext<8 x i32> %val1
179 ; %val2 = load <8 x i8>
180 ; %op2 = zext<8 x i32> %val2
181 ; %rst = mul <8 x i32> %op1, %op2
183 define void @mul_8xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) nounwind {
184 ; X86-SSE-LABEL: mul_8xi8:
185 ; X86-SSE: # %bb.0: # %entry
186 ; X86-SSE-NEXT: pushl %esi
187 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
188 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
189 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
190 ; X86-SSE-NEXT: movl c, %esi
191 ; X86-SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
192 ; X86-SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
193 ; X86-SSE-NEXT: pxor %xmm2, %xmm2
194 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
195 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
196 ; X86-SSE-NEXT: pmullw %xmm0, %xmm1
197 ; X86-SSE-NEXT: movdqa %xmm1, %xmm0
198 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
199 ; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
200 ; X86-SSE-NEXT: movdqu %xmm1, 16(%esi,%ecx,4)
201 ; X86-SSE-NEXT: movdqu %xmm0, (%esi,%ecx,4)
202 ; X86-SSE-NEXT: popl %esi
205 ; X86-AVX1-LABEL: mul_8xi8:
206 ; X86-AVX1: # %bb.0: # %entry
207 ; X86-AVX1-NEXT: pushl %esi
208 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
209 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %ecx
210 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %edx
211 ; X86-AVX1-NEXT: movl c, %esi
212 ; X86-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
213 ; X86-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
214 ; X86-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
215 ; X86-AVX1-NEXT: vpmaddwd %xmm0, %xmm2, %xmm0
216 ; X86-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
217 ; X86-AVX1-NEXT: vpmaddwd %xmm1, %xmm2, %xmm1
218 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
219 ; X86-AVX1-NEXT: vmovups %ymm0, (%esi,%ecx,4)
220 ; X86-AVX1-NEXT: popl %esi
221 ; X86-AVX1-NEXT: vzeroupper
222 ; X86-AVX1-NEXT: retl
224 ; X86-AVX2-LABEL: mul_8xi8:
225 ; X86-AVX2: # %bb.0: # %entry
226 ; X86-AVX2-NEXT: pushl %esi
227 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
228 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
229 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %edx
230 ; X86-AVX2-NEXT: movl c, %esi
231 ; X86-AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
232 ; X86-AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
233 ; X86-AVX2-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0
234 ; X86-AVX2-NEXT: vmovdqu %ymm0, (%esi,%ecx,4)
235 ; X86-AVX2-NEXT: popl %esi
236 ; X86-AVX2-NEXT: vzeroupper
237 ; X86-AVX2-NEXT: retl
239 ; X64-SSE-LABEL: mul_8xi8:
240 ; X64-SSE: # %bb.0: # %entry
241 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
242 ; X64-SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
243 ; X64-SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
244 ; X64-SSE-NEXT: pxor %xmm2, %xmm2
245 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
246 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
247 ; X64-SSE-NEXT: pmullw %xmm0, %xmm1
248 ; X64-SSE-NEXT: movdqa %xmm1, %xmm0
249 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
250 ; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
251 ; X64-SSE-NEXT: movdqu %xmm1, 16(%rax,%rdx,4)
252 ; X64-SSE-NEXT: movdqu %xmm0, (%rax,%rdx,4)
255 ; X64-AVX1-LABEL: mul_8xi8:
256 ; X64-AVX1: # %bb.0: # %entry
257 ; X64-AVX1-NEXT: movq {{.*}}(%rip), %rax
258 ; X64-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
259 ; X64-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
260 ; X64-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
261 ; X64-AVX1-NEXT: vpmaddwd %xmm0, %xmm2, %xmm0
262 ; X64-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
263 ; X64-AVX1-NEXT: vpmaddwd %xmm1, %xmm2, %xmm1
264 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
265 ; X64-AVX1-NEXT: vmovups %ymm0, (%rax,%rdx,4)
266 ; X64-AVX1-NEXT: vzeroupper
267 ; X64-AVX1-NEXT: retq
269 ; X64-AVX2-LABEL: mul_8xi8:
270 ; X64-AVX2: # %bb.0: # %entry
271 ; X64-AVX2-NEXT: movq {{.*}}(%rip), %rax
272 ; X64-AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
273 ; X64-AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
274 ; X64-AVX2-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0
275 ; X64-AVX2-NEXT: vmovdqu %ymm0, (%rax,%rdx,4)
276 ; X64-AVX2-NEXT: vzeroupper
277 ; X64-AVX2-NEXT: retq
279 %pre = load i32*, i32** @c
280 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
281 %tmp7 = bitcast i8* %tmp6 to <8 x i8>*
282 %wide.load = load <8 x i8>, <8 x i8>* %tmp7, align 1
283 %tmp8 = zext <8 x i8> %wide.load to <8 x i32>
284 %tmp10 = getelementptr inbounds i8, i8* %b, i64 %index
285 %tmp11 = bitcast i8* %tmp10 to <8 x i8>*
286 %wide.load17 = load <8 x i8>, <8 x i8>* %tmp11, align 1
287 %tmp12 = zext <8 x i8> %wide.load17 to <8 x i32>
288 %tmp13 = mul nuw nsw <8 x i32> %tmp12, %tmp8
289 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
290 %tmp15 = bitcast i32* %tmp14 to <8 x i32>*
291 store <8 x i32> %tmp13, <8 x i32>* %tmp15, align 4
295 ; %val1 = load <16 x i8>
296 ; %op1 = zext<16 x i32> %val1
297 ; %val2 = load <16 x i8>
298 ; %op2 = zext<16 x i32> %val2
299 ; %rst = mul <16 x i32> %op1, %op2
301 define void @mul_16xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) nounwind {
302 ; X86-SSE-LABEL: mul_16xi8:
303 ; X86-SSE: # %bb.0: # %entry
304 ; X86-SSE-NEXT: pushl %esi
305 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
306 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
307 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
308 ; X86-SSE-NEXT: movl c, %esi
309 ; X86-SSE-NEXT: movdqu (%edx,%ecx), %xmm0
310 ; X86-SSE-NEXT: movdqu (%eax,%ecx), %xmm1
311 ; X86-SSE-NEXT: pxor %xmm2, %xmm2
312 ; X86-SSE-NEXT: movdqa %xmm0, %xmm3
313 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
314 ; X86-SSE-NEXT: movdqa %xmm1, %xmm4
315 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
316 ; X86-SSE-NEXT: pmullw %xmm3, %xmm4
317 ; X86-SSE-NEXT: movdqa %xmm4, %xmm3
318 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
319 ; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
320 ; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
321 ; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
322 ; X86-SSE-NEXT: pmullw %xmm0, %xmm1
323 ; X86-SSE-NEXT: movdqa %xmm1, %xmm0
324 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
325 ; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
326 ; X86-SSE-NEXT: movdqu %xmm1, 48(%esi,%ecx,4)
327 ; X86-SSE-NEXT: movdqu %xmm0, 32(%esi,%ecx,4)
328 ; X86-SSE-NEXT: movdqu %xmm4, 16(%esi,%ecx,4)
329 ; X86-SSE-NEXT: movdqu %xmm3, (%esi,%ecx,4)
330 ; X86-SSE-NEXT: popl %esi
333 ; X86-AVX1-LABEL: mul_16xi8:
334 ; X86-AVX1: # %bb.0: # %entry
335 ; X86-AVX1-NEXT: pushl %esi
336 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
337 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %ecx
338 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %edx
339 ; X86-AVX1-NEXT: movl c, %esi
340 ; X86-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
341 ; X86-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
342 ; X86-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
343 ; X86-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
344 ; X86-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
345 ; X86-AVX1-NEXT: vpmaddwd %xmm0, %xmm4, %xmm0
346 ; X86-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
347 ; X86-AVX1-NEXT: vpmaddwd %xmm1, %xmm4, %xmm1
348 ; X86-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
349 ; X86-AVX1-NEXT: vpmaddwd %xmm2, %xmm4, %xmm2
350 ; X86-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
351 ; X86-AVX1-NEXT: vpmaddwd %xmm3, %xmm4, %xmm3
352 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
353 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
354 ; X86-AVX1-NEXT: vmovups %ymm0, 32(%esi,%ecx,4)
355 ; X86-AVX1-NEXT: vmovups %ymm2, (%esi,%ecx,4)
356 ; X86-AVX1-NEXT: popl %esi
357 ; X86-AVX1-NEXT: vzeroupper
358 ; X86-AVX1-NEXT: retl
360 ; X86-AVX2-LABEL: mul_16xi8:
361 ; X86-AVX2: # %bb.0: # %entry
362 ; X86-AVX2-NEXT: pushl %esi
363 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
364 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
365 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %edx
366 ; X86-AVX2-NEXT: movl c, %esi
367 ; X86-AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
368 ; X86-AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
369 ; X86-AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
370 ; X86-AVX2-NEXT: vpmaddwd %ymm0, %ymm2, %ymm0
371 ; X86-AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
372 ; X86-AVX2-NEXT: vpmaddwd %ymm1, %ymm2, %ymm1
373 ; X86-AVX2-NEXT: vmovdqu %ymm0, 32(%esi,%ecx,4)
374 ; X86-AVX2-NEXT: vmovdqu %ymm1, (%esi,%ecx,4)
375 ; X86-AVX2-NEXT: popl %esi
376 ; X86-AVX2-NEXT: vzeroupper
377 ; X86-AVX2-NEXT: retl
379 ; X64-SSE-LABEL: mul_16xi8:
380 ; X64-SSE: # %bb.0: # %entry
381 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
382 ; X64-SSE-NEXT: movdqu (%rdi,%rdx), %xmm0
383 ; X64-SSE-NEXT: movdqu (%rsi,%rdx), %xmm1
384 ; X64-SSE-NEXT: pxor %xmm2, %xmm2
385 ; X64-SSE-NEXT: movdqa %xmm0, %xmm3
386 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
387 ; X64-SSE-NEXT: movdqa %xmm1, %xmm4
388 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
389 ; X64-SSE-NEXT: pmullw %xmm3, %xmm4
390 ; X64-SSE-NEXT: movdqa %xmm4, %xmm3
391 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
392 ; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
393 ; X64-SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
394 ; X64-SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
395 ; X64-SSE-NEXT: pmullw %xmm0, %xmm1
396 ; X64-SSE-NEXT: movdqa %xmm1, %xmm0
397 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
398 ; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
399 ; X64-SSE-NEXT: movdqu %xmm1, 48(%rax,%rdx,4)
400 ; X64-SSE-NEXT: movdqu %xmm0, 32(%rax,%rdx,4)
401 ; X64-SSE-NEXT: movdqu %xmm4, 16(%rax,%rdx,4)
402 ; X64-SSE-NEXT: movdqu %xmm3, (%rax,%rdx,4)
405 ; X64-AVX1-LABEL: mul_16xi8:
406 ; X64-AVX1: # %bb.0: # %entry
407 ; X64-AVX1-NEXT: movq {{.*}}(%rip), %rax
408 ; X64-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
409 ; X64-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
410 ; X64-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
411 ; X64-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
412 ; X64-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
413 ; X64-AVX1-NEXT: vpmaddwd %xmm0, %xmm4, %xmm0
414 ; X64-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
415 ; X64-AVX1-NEXT: vpmaddwd %xmm1, %xmm4, %xmm1
416 ; X64-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
417 ; X64-AVX1-NEXT: vpmaddwd %xmm2, %xmm4, %xmm2
418 ; X64-AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
419 ; X64-AVX1-NEXT: vpmaddwd %xmm3, %xmm4, %xmm3
420 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
421 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
422 ; X64-AVX1-NEXT: vmovups %ymm0, 32(%rax,%rdx,4)
423 ; X64-AVX1-NEXT: vmovups %ymm2, (%rax,%rdx,4)
424 ; X64-AVX1-NEXT: vzeroupper
425 ; X64-AVX1-NEXT: retq
427 ; X64-AVX2-LABEL: mul_16xi8:
428 ; X64-AVX2: # %bb.0: # %entry
429 ; X64-AVX2-NEXT: movq {{.*}}(%rip), %rax
430 ; X64-AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
431 ; X64-AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
432 ; X64-AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
433 ; X64-AVX2-NEXT: vpmaddwd %ymm0, %ymm2, %ymm0
434 ; X64-AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
435 ; X64-AVX2-NEXT: vpmaddwd %ymm1, %ymm2, %ymm1
436 ; X64-AVX2-NEXT: vmovdqu %ymm0, 32(%rax,%rdx,4)
437 ; X64-AVX2-NEXT: vmovdqu %ymm1, (%rax,%rdx,4)
438 ; X64-AVX2-NEXT: vzeroupper
439 ; X64-AVX2-NEXT: retq
441 %pre = load i32*, i32** @c
442 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
443 %tmp7 = bitcast i8* %tmp6 to <16 x i8>*
444 %wide.load = load <16 x i8>, <16 x i8>* %tmp7, align 1
445 %tmp8 = zext <16 x i8> %wide.load to <16 x i32>
446 %tmp10 = getelementptr inbounds i8, i8* %b, i64 %index
447 %tmp11 = bitcast i8* %tmp10 to <16 x i8>*
448 %wide.load17 = load <16 x i8>, <16 x i8>* %tmp11, align 1
449 %tmp12 = zext <16 x i8> %wide.load17 to <16 x i32>
450 %tmp13 = mul nuw nsw <16 x i32> %tmp12, %tmp8
451 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
452 %tmp15 = bitcast i32* %tmp14 to <16 x i32>*
453 store <16 x i32> %tmp13, <16 x i32>* %tmp15, align 4
457 ; %val1 = load <2 x i16>
458 ; %op1 = zext<2 x i32> %val1
459 ; %val2 = load <2 x i16>
460 ; %op2 = zext<2 x i32> %val2
461 ; %rst = mul <2 x i32> %op1, %op2
463 define void @mul_2xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) nounwind {
464 ; X86-SSE-LABEL: mul_2xi16:
465 ; X86-SSE: # %bb.0: # %entry
466 ; X86-SSE-NEXT: pushl %esi
467 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
468 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
469 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
470 ; X86-SSE-NEXT: movl c, %esi
471 ; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
472 ; X86-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
473 ; X86-SSE-NEXT: movdqa %xmm1, %xmm2
474 ; X86-SSE-NEXT: pmulhuw %xmm0, %xmm2
475 ; X86-SSE-NEXT: pmullw %xmm0, %xmm1
476 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
477 ; X86-SSE-NEXT: movq %xmm1, (%esi,%ecx,4)
478 ; X86-SSE-NEXT: popl %esi
481 ; X86-AVX-LABEL: mul_2xi16:
482 ; X86-AVX: # %bb.0: # %entry
483 ; X86-AVX-NEXT: pushl %esi
484 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
485 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
486 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
487 ; X86-AVX-NEXT: movl c, %esi
488 ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
489 ; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
490 ; X86-AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
491 ; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
492 ; X86-AVX-NEXT: vpmulld %xmm0, %xmm1, %xmm0
493 ; X86-AVX-NEXT: vmovq %xmm0, (%esi,%ecx,4)
494 ; X86-AVX-NEXT: popl %esi
497 ; X64-SSE-LABEL: mul_2xi16:
498 ; X64-SSE: # %bb.0: # %entry
499 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
500 ; X64-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
501 ; X64-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
502 ; X64-SSE-NEXT: movdqa %xmm1, %xmm2
503 ; X64-SSE-NEXT: pmulhuw %xmm0, %xmm2
504 ; X64-SSE-NEXT: pmullw %xmm0, %xmm1
505 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
506 ; X64-SSE-NEXT: movq %xmm1, (%rax,%rdx,4)
509 ; X64-AVX-LABEL: mul_2xi16:
510 ; X64-AVX: # %bb.0: # %entry
511 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
512 ; X64-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
513 ; X64-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
514 ; X64-AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
515 ; X64-AVX-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
516 ; X64-AVX-NEXT: vpmulld %xmm0, %xmm1, %xmm0
517 ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rdx,4)
520 %pre = load i32*, i32** @c
521 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
522 %tmp7 = bitcast i8* %tmp6 to <2 x i16>*
523 %wide.load = load <2 x i16>, <2 x i16>* %tmp7, align 1
524 %tmp8 = zext <2 x i16> %wide.load to <2 x i32>
525 %tmp10 = getelementptr inbounds i8, i8* %b, i64 %index
526 %tmp11 = bitcast i8* %tmp10 to <2 x i16>*
527 %wide.load17 = load <2 x i16>, <2 x i16>* %tmp11, align 1
528 %tmp12 = zext <2 x i16> %wide.load17 to <2 x i32>
529 %tmp13 = mul nuw nsw <2 x i32> %tmp12, %tmp8
530 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
531 %tmp15 = bitcast i32* %tmp14 to <2 x i32>*
532 store <2 x i32> %tmp13, <2 x i32>* %tmp15, align 4
536 ; %val1 = load <4 x i16>
537 ; %op1 = zext<4 x i32> %val1
538 ; %val2 = load <4 x i16>
539 ; %op2 = zext<4 x i32> %val2
540 ; %rst = mul <4 x i32> %op1, %op2
542 define void @mul_4xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) nounwind {
543 ; X86-SSE-LABEL: mul_4xi16:
544 ; X86-SSE: # %bb.0: # %entry
545 ; X86-SSE-NEXT: pushl %esi
546 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
547 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
548 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
549 ; X86-SSE-NEXT: movl c, %esi
550 ; X86-SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
551 ; X86-SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
552 ; X86-SSE-NEXT: movdqa %xmm1, %xmm2
553 ; X86-SSE-NEXT: pmulhuw %xmm0, %xmm2
554 ; X86-SSE-NEXT: pmullw %xmm0, %xmm1
555 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
556 ; X86-SSE-NEXT: movdqu %xmm1, (%esi,%ecx,4)
557 ; X86-SSE-NEXT: popl %esi
560 ; X86-AVX-LABEL: mul_4xi16:
561 ; X86-AVX: # %bb.0: # %entry
562 ; X86-AVX-NEXT: pushl %esi
563 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
564 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
565 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
566 ; X86-AVX-NEXT: movl c, %esi
567 ; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
568 ; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
569 ; X86-AVX-NEXT: vpmulld %xmm0, %xmm1, %xmm0
570 ; X86-AVX-NEXT: vmovdqu %xmm0, (%esi,%ecx,4)
571 ; X86-AVX-NEXT: popl %esi
574 ; X64-SSE-LABEL: mul_4xi16:
575 ; X64-SSE: # %bb.0: # %entry
576 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
577 ; X64-SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
578 ; X64-SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
579 ; X64-SSE-NEXT: movdqa %xmm1, %xmm2
580 ; X64-SSE-NEXT: pmulhuw %xmm0, %xmm2
581 ; X64-SSE-NEXT: pmullw %xmm0, %xmm1
582 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
583 ; X64-SSE-NEXT: movdqu %xmm1, (%rax,%rdx,4)
586 ; X64-AVX-LABEL: mul_4xi16:
587 ; X64-AVX: # %bb.0: # %entry
588 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
589 ; X64-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
590 ; X64-AVX-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
591 ; X64-AVX-NEXT: vpmulld %xmm0, %xmm1, %xmm0
592 ; X64-AVX-NEXT: vmovdqu %xmm0, (%rax,%rdx,4)
595 %pre = load i32*, i32** @c
596 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
597 %tmp7 = bitcast i8* %tmp6 to <4 x i16>*
598 %wide.load = load <4 x i16>, <4 x i16>* %tmp7, align 1
599 %tmp8 = zext <4 x i16> %wide.load to <4 x i32>
600 %tmp10 = getelementptr inbounds i8, i8* %b, i64 %index
601 %tmp11 = bitcast i8* %tmp10 to <4 x i16>*
602 %wide.load17 = load <4 x i16>, <4 x i16>* %tmp11, align 1
603 %tmp12 = zext <4 x i16> %wide.load17 to <4 x i32>
604 %tmp13 = mul nuw nsw <4 x i32> %tmp12, %tmp8
605 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
606 %tmp15 = bitcast i32* %tmp14 to <4 x i32>*
607 store <4 x i32> %tmp13, <4 x i32>* %tmp15, align 4
611 ; %val1 = load <8 x i16>
612 ; %op1 = zext<8 x i32> %val1
613 ; %val2 = load <8 x i16>
614 ; %op2 = zext<8 x i32> %val2
615 ; %rst = mul <8 x i32> %op1, %op2
617 define void @mul_8xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) nounwind {
618 ; X86-SSE-LABEL: mul_8xi16:
619 ; X86-SSE: # %bb.0: # %entry
620 ; X86-SSE-NEXT: pushl %esi
621 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
622 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
623 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
624 ; X86-SSE-NEXT: movl c, %esi
625 ; X86-SSE-NEXT: movdqu (%edx,%ecx), %xmm0
626 ; X86-SSE-NEXT: movdqu (%eax,%ecx), %xmm1
627 ; X86-SSE-NEXT: movdqa %xmm1, %xmm2
628 ; X86-SSE-NEXT: pmulhuw %xmm0, %xmm2
629 ; X86-SSE-NEXT: pmullw %xmm0, %xmm1
630 ; X86-SSE-NEXT: movdqa %xmm1, %xmm0
631 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
632 ; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
633 ; X86-SSE-NEXT: movdqu %xmm1, 16(%esi,%ecx,4)
634 ; X86-SSE-NEXT: movdqu %xmm0, (%esi,%ecx,4)
635 ; X86-SSE-NEXT: popl %esi
638 ; X86-AVX1-LABEL: mul_8xi16:
639 ; X86-AVX1: # %bb.0: # %entry
640 ; X86-AVX1-NEXT: pushl %esi
641 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
642 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %ecx
643 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %edx
644 ; X86-AVX1-NEXT: movl c, %esi
645 ; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
646 ; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
647 ; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
648 ; X86-AVX1-NEXT: vpmulld %xmm0, %xmm2, %xmm0
649 ; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
650 ; X86-AVX1-NEXT: vpmulld %xmm1, %xmm2, %xmm1
651 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
652 ; X86-AVX1-NEXT: vmovups %ymm0, (%esi,%ecx,4)
653 ; X86-AVX1-NEXT: popl %esi
654 ; X86-AVX1-NEXT: vzeroupper
655 ; X86-AVX1-NEXT: retl
657 ; X86-AVX2-LABEL: mul_8xi16:
658 ; X86-AVX2: # %bb.0: # %entry
659 ; X86-AVX2-NEXT: pushl %esi
660 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
661 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
662 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %edx
663 ; X86-AVX2-NEXT: movl c, %esi
664 ; X86-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
665 ; X86-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
666 ; X86-AVX2-NEXT: vpmulld %ymm0, %ymm1, %ymm0
667 ; X86-AVX2-NEXT: vmovdqu %ymm0, (%esi,%ecx,4)
668 ; X86-AVX2-NEXT: popl %esi
669 ; X86-AVX2-NEXT: vzeroupper
670 ; X86-AVX2-NEXT: retl
672 ; X64-SSE-LABEL: mul_8xi16:
673 ; X64-SSE: # %bb.0: # %entry
674 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
675 ; X64-SSE-NEXT: movdqu (%rdi,%rdx), %xmm0
676 ; X64-SSE-NEXT: movdqu (%rsi,%rdx), %xmm1
677 ; X64-SSE-NEXT: movdqa %xmm1, %xmm2
678 ; X64-SSE-NEXT: pmulhuw %xmm0, %xmm2
679 ; X64-SSE-NEXT: pmullw %xmm0, %xmm1
680 ; X64-SSE-NEXT: movdqa %xmm1, %xmm0
681 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
682 ; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
683 ; X64-SSE-NEXT: movdqu %xmm1, 16(%rax,%rdx,4)
684 ; X64-SSE-NEXT: movdqu %xmm0, (%rax,%rdx,4)
687 ; X64-AVX1-LABEL: mul_8xi16:
688 ; X64-AVX1: # %bb.0: # %entry
689 ; X64-AVX1-NEXT: movq {{.*}}(%rip), %rax
690 ; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
691 ; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
692 ; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
693 ; X64-AVX1-NEXT: vpmulld %xmm0, %xmm2, %xmm0
694 ; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
695 ; X64-AVX1-NEXT: vpmulld %xmm1, %xmm2, %xmm1
696 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
697 ; X64-AVX1-NEXT: vmovups %ymm0, (%rax,%rdx,4)
698 ; X64-AVX1-NEXT: vzeroupper
699 ; X64-AVX1-NEXT: retq
701 ; X64-AVX2-LABEL: mul_8xi16:
702 ; X64-AVX2: # %bb.0: # %entry
703 ; X64-AVX2-NEXT: movq {{.*}}(%rip), %rax
704 ; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
705 ; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
706 ; X64-AVX2-NEXT: vpmulld %ymm0, %ymm1, %ymm0
707 ; X64-AVX2-NEXT: vmovdqu %ymm0, (%rax,%rdx,4)
708 ; X64-AVX2-NEXT: vzeroupper
709 ; X64-AVX2-NEXT: retq
711 %pre = load i32*, i32** @c
712 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
713 %tmp7 = bitcast i8* %tmp6 to <8 x i16>*
714 %wide.load = load <8 x i16>, <8 x i16>* %tmp7, align 1
715 %tmp8 = zext <8 x i16> %wide.load to <8 x i32>
716 %tmp10 = getelementptr inbounds i8, i8* %b, i64 %index
717 %tmp11 = bitcast i8* %tmp10 to <8 x i16>*
718 %wide.load17 = load <8 x i16>, <8 x i16>* %tmp11, align 1
719 %tmp12 = zext <8 x i16> %wide.load17 to <8 x i32>
720 %tmp13 = mul nuw nsw <8 x i32> %tmp12, %tmp8
721 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
722 %tmp15 = bitcast i32* %tmp14 to <8 x i32>*
723 store <8 x i32> %tmp13, <8 x i32>* %tmp15, align 4
727 ; %val1 = load <16 x i16>
728 ; %op1 = zext<16 x i32> %val1
729 ; %val2 = load <16 x i16>
730 ; %op2 = zext<16 x i32> %val2
731 ; %rst = mul <16 x i32> %op1, %op2
733 define void @mul_16xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) nounwind {
734 ; X86-SSE-LABEL: mul_16xi16:
735 ; X86-SSE: # %bb.0: # %entry
736 ; X86-SSE-NEXT: pushl %esi
737 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
738 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
739 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
740 ; X86-SSE-NEXT: movl c, %esi
741 ; X86-SSE-NEXT: movdqu (%edx,%ecx), %xmm0
742 ; X86-SSE-NEXT: movdqu 16(%edx,%ecx), %xmm1
743 ; X86-SSE-NEXT: movdqu (%eax,%ecx), %xmm2
744 ; X86-SSE-NEXT: movdqu 16(%eax,%ecx), %xmm3
745 ; X86-SSE-NEXT: movdqa %xmm2, %xmm4
746 ; X86-SSE-NEXT: pmulhuw %xmm0, %xmm4
747 ; X86-SSE-NEXT: pmullw %xmm0, %xmm2
748 ; X86-SSE-NEXT: movdqa %xmm2, %xmm0
749 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
750 ; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
751 ; X86-SSE-NEXT: movdqa %xmm3, %xmm4
752 ; X86-SSE-NEXT: pmulhuw %xmm1, %xmm4
753 ; X86-SSE-NEXT: pmullw %xmm1, %xmm3
754 ; X86-SSE-NEXT: movdqa %xmm3, %xmm1
755 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
756 ; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
757 ; X86-SSE-NEXT: movdqu %xmm3, 48(%esi,%ecx,4)
758 ; X86-SSE-NEXT: movdqu %xmm1, 32(%esi,%ecx,4)
759 ; X86-SSE-NEXT: movdqu %xmm2, 16(%esi,%ecx,4)
760 ; X86-SSE-NEXT: movdqu %xmm0, (%esi,%ecx,4)
761 ; X86-SSE-NEXT: popl %esi
764 ; X86-AVX1-LABEL: mul_16xi16:
765 ; X86-AVX1: # %bb.0: # %entry
766 ; X86-AVX1-NEXT: pushl %esi
767 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
768 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %ecx
769 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %edx
770 ; X86-AVX1-NEXT: movl c, %esi
771 ; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
772 ; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
773 ; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
774 ; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
775 ; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
776 ; X86-AVX1-NEXT: vpmulld %xmm0, %xmm4, %xmm0
777 ; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
778 ; X86-AVX1-NEXT: vpmulld %xmm1, %xmm4, %xmm1
779 ; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
780 ; X86-AVX1-NEXT: vpmulld %xmm2, %xmm4, %xmm2
781 ; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
782 ; X86-AVX1-NEXT: vpmulld %xmm3, %xmm4, %xmm3
783 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
784 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
785 ; X86-AVX1-NEXT: vmovups %ymm0, 32(%esi,%ecx,4)
786 ; X86-AVX1-NEXT: vmovups %ymm2, (%esi,%ecx,4)
787 ; X86-AVX1-NEXT: popl %esi
788 ; X86-AVX1-NEXT: vzeroupper
789 ; X86-AVX1-NEXT: retl
791 ; X86-AVX2-LABEL: mul_16xi16:
792 ; X86-AVX2: # %bb.0: # %entry
793 ; X86-AVX2-NEXT: pushl %esi
794 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
795 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
796 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %edx
797 ; X86-AVX2-NEXT: movl c, %esi
798 ; X86-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
799 ; X86-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
800 ; X86-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
801 ; X86-AVX2-NEXT: vpmulld %ymm0, %ymm2, %ymm0
802 ; X86-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
803 ; X86-AVX2-NEXT: vpmulld %ymm1, %ymm2, %ymm1
804 ; X86-AVX2-NEXT: vmovdqu %ymm0, 32(%esi,%ecx,4)
805 ; X86-AVX2-NEXT: vmovdqu %ymm1, (%esi,%ecx,4)
806 ; X86-AVX2-NEXT: popl %esi
807 ; X86-AVX2-NEXT: vzeroupper
808 ; X86-AVX2-NEXT: retl
810 ; X64-SSE-LABEL: mul_16xi16:
811 ; X64-SSE: # %bb.0: # %entry
812 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
813 ; X64-SSE-NEXT: movdqu (%rdi,%rdx), %xmm0
814 ; X64-SSE-NEXT: movdqu 16(%rdi,%rdx), %xmm1
815 ; X64-SSE-NEXT: movdqu (%rsi,%rdx), %xmm2
816 ; X64-SSE-NEXT: movdqu 16(%rsi,%rdx), %xmm3
817 ; X64-SSE-NEXT: movdqa %xmm2, %xmm4
818 ; X64-SSE-NEXT: pmulhuw %xmm0, %xmm4
819 ; X64-SSE-NEXT: pmullw %xmm0, %xmm2
820 ; X64-SSE-NEXT: movdqa %xmm2, %xmm0
821 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
822 ; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
823 ; X64-SSE-NEXT: movdqa %xmm3, %xmm4
824 ; X64-SSE-NEXT: pmulhuw %xmm1, %xmm4
825 ; X64-SSE-NEXT: pmullw %xmm1, %xmm3
826 ; X64-SSE-NEXT: movdqa %xmm3, %xmm1
827 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
828 ; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
829 ; X64-SSE-NEXT: movdqu %xmm3, 48(%rax,%rdx,4)
830 ; X64-SSE-NEXT: movdqu %xmm1, 32(%rax,%rdx,4)
831 ; X64-SSE-NEXT: movdqu %xmm2, 16(%rax,%rdx,4)
832 ; X64-SSE-NEXT: movdqu %xmm0, (%rax,%rdx,4)
835 ; X64-AVX1-LABEL: mul_16xi16:
836 ; X64-AVX1: # %bb.0: # %entry
837 ; X64-AVX1-NEXT: movq {{.*}}(%rip), %rax
838 ; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
839 ; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
840 ; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
841 ; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
842 ; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
843 ; X64-AVX1-NEXT: vpmulld %xmm0, %xmm4, %xmm0
844 ; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
845 ; X64-AVX1-NEXT: vpmulld %xmm1, %xmm4, %xmm1
846 ; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
847 ; X64-AVX1-NEXT: vpmulld %xmm2, %xmm4, %xmm2
848 ; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
849 ; X64-AVX1-NEXT: vpmulld %xmm3, %xmm4, %xmm3
850 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
851 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
852 ; X64-AVX1-NEXT: vmovups %ymm0, 32(%rax,%rdx,4)
853 ; X64-AVX1-NEXT: vmovups %ymm2, (%rax,%rdx,4)
854 ; X64-AVX1-NEXT: vzeroupper
855 ; X64-AVX1-NEXT: retq
857 ; X64-AVX2-LABEL: mul_16xi16:
858 ; X64-AVX2: # %bb.0: # %entry
859 ; X64-AVX2-NEXT: movq {{.*}}(%rip), %rax
860 ; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
861 ; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
862 ; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
863 ; X64-AVX2-NEXT: vpmulld %ymm0, %ymm2, %ymm0
864 ; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
865 ; X64-AVX2-NEXT: vpmulld %ymm1, %ymm2, %ymm1
866 ; X64-AVX2-NEXT: vmovdqu %ymm0, 32(%rax,%rdx,4)
867 ; X64-AVX2-NEXT: vmovdqu %ymm1, (%rax,%rdx,4)
868 ; X64-AVX2-NEXT: vzeroupper
869 ; X64-AVX2-NEXT: retq
871 %pre = load i32*, i32** @c
872 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
873 %tmp7 = bitcast i8* %tmp6 to <16 x i16>*
874 %wide.load = load <16 x i16>, <16 x i16>* %tmp7, align 1
875 %tmp8 = zext <16 x i16> %wide.load to <16 x i32>
876 %tmp10 = getelementptr inbounds i8, i8* %b, i64 %index
877 %tmp11 = bitcast i8* %tmp10 to <16 x i16>*
878 %wide.load17 = load <16 x i16>, <16 x i16>* %tmp11, align 1
879 %tmp12 = zext <16 x i16> %wide.load17 to <16 x i32>
880 %tmp13 = mul nuw nsw <16 x i32> %tmp12, %tmp8
881 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
882 %tmp15 = bitcast i32* %tmp14 to <16 x i32>*
883 store <16 x i32> %tmp13, <16 x i32>* %tmp15, align 4
887 ; %val1 = load <2 x i8>
888 ; %op1 = sext<2 x i32> %val1
889 ; %val2 = load <2 x i8>
890 ; %op2 = sext<2 x i32> %val2
891 ; %rst = mul <2 x i32> %op1, %op2
893 define void @mul_2xi8_sext(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) nounwind {
894 ; X86-SSE-LABEL: mul_2xi8_sext:
895 ; X86-SSE: # %bb.0: # %entry
896 ; X86-SSE-NEXT: pushl %esi
897 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
898 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
899 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
900 ; X86-SSE-NEXT: movl c, %esi
901 ; X86-SSE-NEXT: movzwl (%edx,%ecx), %edx
902 ; X86-SSE-NEXT: movd %edx, %xmm0
903 ; X86-SSE-NEXT: movzwl (%eax,%ecx), %eax
904 ; X86-SSE-NEXT: movd %eax, %xmm1
905 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
906 ; X86-SSE-NEXT: psraw $8, %xmm0
907 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
908 ; X86-SSE-NEXT: psraw $8, %xmm1
909 ; X86-SSE-NEXT: pmullw %xmm0, %xmm1
910 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
911 ; X86-SSE-NEXT: psrad $16, %xmm0
912 ; X86-SSE-NEXT: movq %xmm0, (%esi,%ecx,4)
913 ; X86-SSE-NEXT: popl %esi
916 ; X86-AVX-LABEL: mul_2xi8_sext:
917 ; X86-AVX: # %bb.0: # %entry
918 ; X86-AVX-NEXT: pushl %esi
919 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
920 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
921 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
922 ; X86-AVX-NEXT: movl c, %esi
923 ; X86-AVX-NEXT: movzwl (%edx,%ecx), %edx
924 ; X86-AVX-NEXT: vmovd %edx, %xmm0
925 ; X86-AVX-NEXT: vpmovsxbd %xmm0, %xmm0
926 ; X86-AVX-NEXT: movzwl (%eax,%ecx), %eax
927 ; X86-AVX-NEXT: vmovd %eax, %xmm1
928 ; X86-AVX-NEXT: vpmovsxbd %xmm1, %xmm1
929 ; X86-AVX-NEXT: vpmulld %xmm0, %xmm1, %xmm0
930 ; X86-AVX-NEXT: vmovq %xmm0, (%esi,%ecx,4)
931 ; X86-AVX-NEXT: popl %esi
934 ; X64-SSE-LABEL: mul_2xi8_sext:
935 ; X64-SSE: # %bb.0: # %entry
936 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
937 ; X64-SSE-NEXT: movzwl (%rdi,%rdx), %ecx
938 ; X64-SSE-NEXT: movd %ecx, %xmm0
939 ; X64-SSE-NEXT: movzwl (%rsi,%rdx), %ecx
940 ; X64-SSE-NEXT: movd %ecx, %xmm1
941 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
942 ; X64-SSE-NEXT: psraw $8, %xmm0
943 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
944 ; X64-SSE-NEXT: psraw $8, %xmm1
945 ; X64-SSE-NEXT: pmullw %xmm0, %xmm1
946 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
947 ; X64-SSE-NEXT: psrad $16, %xmm0
948 ; X64-SSE-NEXT: movq %xmm0, (%rax,%rdx,4)
951 ; X64-AVX-LABEL: mul_2xi8_sext:
952 ; X64-AVX: # %bb.0: # %entry
953 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
954 ; X64-AVX-NEXT: movzwl (%rdi,%rdx), %ecx
955 ; X64-AVX-NEXT: vmovd %ecx, %xmm0
956 ; X64-AVX-NEXT: vpmovsxbd %xmm0, %xmm0
957 ; X64-AVX-NEXT: movzwl (%rsi,%rdx), %ecx
958 ; X64-AVX-NEXT: vmovd %ecx, %xmm1
959 ; X64-AVX-NEXT: vpmovsxbd %xmm1, %xmm1
960 ; X64-AVX-NEXT: vpmulld %xmm0, %xmm1, %xmm0
961 ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rdx,4)
964 %pre = load i32*, i32** @c
965 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
966 %tmp7 = bitcast i8* %tmp6 to <2 x i8>*
967 %wide.load = load <2 x i8>, <2 x i8>* %tmp7, align 1
968 %tmp8 = sext <2 x i8> %wide.load to <2 x i32>
969 %tmp10 = getelementptr inbounds i8, i8* %b, i64 %index
970 %tmp11 = bitcast i8* %tmp10 to <2 x i8>*
971 %wide.load17 = load <2 x i8>, <2 x i8>* %tmp11, align 1
972 %tmp12 = sext <2 x i8> %wide.load17 to <2 x i32>
973 %tmp13 = mul nuw nsw <2 x i32> %tmp12, %tmp8
974 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
975 %tmp15 = bitcast i32* %tmp14 to <2 x i32>*
976 store <2 x i32> %tmp13, <2 x i32>* %tmp15, align 4
980 ; %val1 = load <2 x i8>
981 ; %op1 = sext<2 x i32> %val1
982 ; %val2 = load <2 x i8>
983 ; %op2 = zext<2 x i32> %val2
984 ; %rst = mul <2 x i32> %op1, %op2
986 define void @mul_2xi8_sext_zext(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) nounwind {
987 ; X86-SSE-LABEL: mul_2xi8_sext_zext:
988 ; X86-SSE: # %bb.0: # %entry
989 ; X86-SSE-NEXT: pushl %esi
990 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
991 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
992 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
993 ; X86-SSE-NEXT: movl c, %esi
994 ; X86-SSE-NEXT: movzwl (%edx,%ecx), %edx
995 ; X86-SSE-NEXT: movd %edx, %xmm0
996 ; X86-SSE-NEXT: movzwl (%eax,%ecx), %eax
997 ; X86-SSE-NEXT: movd %eax, %xmm1
998 ; X86-SSE-NEXT: pxor %xmm2, %xmm2
999 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
1000 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1001 ; X86-SSE-NEXT: psraw $8, %xmm0
1002 ; X86-SSE-NEXT: movdqa %xmm1, %xmm2
1003 ; X86-SSE-NEXT: pmulhw %xmm0, %xmm2
1004 ; X86-SSE-NEXT: pmullw %xmm1, %xmm0
1005 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1006 ; X86-SSE-NEXT: movq %xmm0, (%esi,%ecx,4)
1007 ; X86-SSE-NEXT: popl %esi
1008 ; X86-SSE-NEXT: retl
1010 ; X86-AVX-LABEL: mul_2xi8_sext_zext:
1011 ; X86-AVX: # %bb.0: # %entry
1012 ; X86-AVX-NEXT: pushl %esi
1013 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
1014 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
1015 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
1016 ; X86-AVX-NEXT: movl c, %esi
1017 ; X86-AVX-NEXT: movzwl (%edx,%ecx), %edx
1018 ; X86-AVX-NEXT: vmovd %edx, %xmm0
1019 ; X86-AVX-NEXT: vpmovsxbd %xmm0, %xmm0
1020 ; X86-AVX-NEXT: movzwl (%eax,%ecx), %eax
1021 ; X86-AVX-NEXT: vmovd %eax, %xmm1
1022 ; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
1023 ; X86-AVX-NEXT: vpmulld %xmm0, %xmm1, %xmm0
1024 ; X86-AVX-NEXT: vmovq %xmm0, (%esi,%ecx,4)
1025 ; X86-AVX-NEXT: popl %esi
1026 ; X86-AVX-NEXT: retl
1028 ; X64-SSE-LABEL: mul_2xi8_sext_zext:
1029 ; X64-SSE: # %bb.0: # %entry
1030 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
1031 ; X64-SSE-NEXT: movzwl (%rdi,%rdx), %ecx
1032 ; X64-SSE-NEXT: movd %ecx, %xmm0
1033 ; X64-SSE-NEXT: movzwl (%rsi,%rdx), %ecx
1034 ; X64-SSE-NEXT: movd %ecx, %xmm1
1035 ; X64-SSE-NEXT: pxor %xmm2, %xmm2
1036 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
1037 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1038 ; X64-SSE-NEXT: psraw $8, %xmm0
1039 ; X64-SSE-NEXT: movdqa %xmm1, %xmm2
1040 ; X64-SSE-NEXT: pmulhw %xmm0, %xmm2
1041 ; X64-SSE-NEXT: pmullw %xmm1, %xmm0
1042 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1043 ; X64-SSE-NEXT: movq %xmm0, (%rax,%rdx,4)
1044 ; X64-SSE-NEXT: retq
1046 ; X64-AVX-LABEL: mul_2xi8_sext_zext:
1047 ; X64-AVX: # %bb.0: # %entry
1048 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
1049 ; X64-AVX-NEXT: movzwl (%rdi,%rdx), %ecx
1050 ; X64-AVX-NEXT: vmovd %ecx, %xmm0
1051 ; X64-AVX-NEXT: vpmovsxbd %xmm0, %xmm0
1052 ; X64-AVX-NEXT: movzwl (%rsi,%rdx), %ecx
1053 ; X64-AVX-NEXT: vmovd %ecx, %xmm1
1054 ; X64-AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
1055 ; X64-AVX-NEXT: vpmulld %xmm0, %xmm1, %xmm0
1056 ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rdx,4)
1057 ; X64-AVX-NEXT: retq
1059 %pre = load i32*, i32** @c
1060 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
1061 %tmp7 = bitcast i8* %tmp6 to <2 x i8>*
1062 %wide.load = load <2 x i8>, <2 x i8>* %tmp7, align 1
1063 %tmp8 = sext <2 x i8> %wide.load to <2 x i32>
1064 %tmp10 = getelementptr inbounds i8, i8* %b, i64 %index
1065 %tmp11 = bitcast i8* %tmp10 to <2 x i8>*
1066 %wide.load17 = load <2 x i8>, <2 x i8>* %tmp11, align 1
1067 %tmp12 = zext <2 x i8> %wide.load17 to <2 x i32>
1068 %tmp13 = mul nuw nsw <2 x i32> %tmp12, %tmp8
1069 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
1070 %tmp15 = bitcast i32* %tmp14 to <2 x i32>*
1071 store <2 x i32> %tmp13, <2 x i32>* %tmp15, align 4
1075 ; %val1 = load <2 x i16>
1076 ; %op1 = sext<2 x i32> %val1
1077 ; %val2 = load <2 x i16>
1078 ; %op2 = sext<2 x i32> %val2
1079 ; %rst = mul <2 x i32> %op1, %op2
1081 define void @mul_2xi16_sext(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) nounwind {
1082 ; X86-SSE-LABEL: mul_2xi16_sext:
1083 ; X86-SSE: # %bb.0: # %entry
1084 ; X86-SSE-NEXT: pushl %esi
1085 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
1086 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
1087 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
1088 ; X86-SSE-NEXT: movl c, %esi
1089 ; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1090 ; X86-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
1091 ; X86-SSE-NEXT: movdqa %xmm1, %xmm2
1092 ; X86-SSE-NEXT: pmulhw %xmm0, %xmm2
1093 ; X86-SSE-NEXT: pmullw %xmm0, %xmm1
1094 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
1095 ; X86-SSE-NEXT: movq %xmm1, (%esi,%ecx,4)
1096 ; X86-SSE-NEXT: popl %esi
1097 ; X86-SSE-NEXT: retl
1099 ; X86-AVX-LABEL: mul_2xi16_sext:
1100 ; X86-AVX: # %bb.0: # %entry
1101 ; X86-AVX-NEXT: pushl %esi
1102 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
1103 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
1104 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
1105 ; X86-AVX-NEXT: movl c, %esi
1106 ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1107 ; X86-AVX-NEXT: vpmovsxwd %xmm0, %xmm0
1108 ; X86-AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
1109 ; X86-AVX-NEXT: vpmovsxwd %xmm1, %xmm1
1110 ; X86-AVX-NEXT: vpmulld %xmm0, %xmm1, %xmm0
1111 ; X86-AVX-NEXT: vmovq %xmm0, (%esi,%ecx,4)
1112 ; X86-AVX-NEXT: popl %esi
1113 ; X86-AVX-NEXT: retl
1115 ; X64-SSE-LABEL: mul_2xi16_sext:
1116 ; X64-SSE: # %bb.0: # %entry
1117 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
1118 ; X64-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1119 ; X64-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
1120 ; X64-SSE-NEXT: movdqa %xmm1, %xmm2
1121 ; X64-SSE-NEXT: pmulhw %xmm0, %xmm2
1122 ; X64-SSE-NEXT: pmullw %xmm0, %xmm1
1123 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
1124 ; X64-SSE-NEXT: movq %xmm1, (%rax,%rdx,4)
1125 ; X64-SSE-NEXT: retq
1127 ; X64-AVX-LABEL: mul_2xi16_sext:
1128 ; X64-AVX: # %bb.0: # %entry
1129 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
1130 ; X64-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1131 ; X64-AVX-NEXT: vpmovsxwd %xmm0, %xmm0
1132 ; X64-AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
1133 ; X64-AVX-NEXT: vpmovsxwd %xmm1, %xmm1
1134 ; X64-AVX-NEXT: vpmulld %xmm0, %xmm1, %xmm0
1135 ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rdx,4)
1136 ; X64-AVX-NEXT: retq
1138 %pre = load i32*, i32** @c
1139 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
1140 %tmp7 = bitcast i8* %tmp6 to <2 x i16>*
1141 %wide.load = load <2 x i16>, <2 x i16>* %tmp7, align 1
1142 %tmp8 = sext <2 x i16> %wide.load to <2 x i32>
1143 %tmp10 = getelementptr inbounds i8, i8* %b, i64 %index
1144 %tmp11 = bitcast i8* %tmp10 to <2 x i16>*
1145 %wide.load17 = load <2 x i16>, <2 x i16>* %tmp11, align 1
1146 %tmp12 = sext <2 x i16> %wide.load17 to <2 x i32>
1147 %tmp13 = mul nuw nsw <2 x i32> %tmp12, %tmp8
1148 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
1149 %tmp15 = bitcast i32* %tmp14 to <2 x i32>*
1150 store <2 x i32> %tmp13, <2 x i32>* %tmp15, align 4
1154 ; %val1 = load <2 x i16>
1155 ; %op1 = sext<2 x i32> %val1
1156 ; %val2 = load <2 x i16>
1157 ; %op2 = zext<2 x i32> %val2
1158 ; %rst = mul <2 x i32> %op1, %op2
1160 define void @mul_2xi16_sext_zext(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) nounwind {
1161 ; X86-SSE-LABEL: mul_2xi16_sext_zext:
1162 ; X86-SSE: # %bb.0: # %entry
1163 ; X86-SSE-NEXT: pushl %esi
1164 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
1165 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
1166 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
1167 ; X86-SSE-NEXT: movl c, %esi
1168 ; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1169 ; X86-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
1170 ; X86-SSE-NEXT: psrad $16, %xmm0
1171 ; X86-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
1172 ; X86-SSE-NEXT: pxor %xmm2, %xmm2
1173 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
1174 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
1175 ; X86-SSE-NEXT: pmuludq %xmm0, %xmm1
1176 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
1177 ; X86-SSE-NEXT: pmuludq %xmm2, %xmm0
1178 ; X86-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1179 ; X86-SSE-NEXT: movq %xmm1, (%esi,%ecx,4)
1180 ; X86-SSE-NEXT: popl %esi
1181 ; X86-SSE-NEXT: retl
1183 ; X86-AVX-LABEL: mul_2xi16_sext_zext:
1184 ; X86-AVX: # %bb.0: # %entry
1185 ; X86-AVX-NEXT: pushl %esi
1186 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
1187 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
1188 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
1189 ; X86-AVX-NEXT: movl c, %esi
1190 ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1191 ; X86-AVX-NEXT: vpmovsxwd %xmm0, %xmm0
1192 ; X86-AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
1193 ; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
1194 ; X86-AVX-NEXT: vpmulld %xmm0, %xmm1, %xmm0
1195 ; X86-AVX-NEXT: vmovq %xmm0, (%esi,%ecx,4)
1196 ; X86-AVX-NEXT: popl %esi
1197 ; X86-AVX-NEXT: retl
1199 ; X64-SSE-LABEL: mul_2xi16_sext_zext:
1200 ; X64-SSE: # %bb.0: # %entry
1201 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
1202 ; X64-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1203 ; X64-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
1204 ; X64-SSE-NEXT: psrad $16, %xmm0
1205 ; X64-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
1206 ; X64-SSE-NEXT: pxor %xmm2, %xmm2
1207 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
1208 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
1209 ; X64-SSE-NEXT: pmuludq %xmm0, %xmm1
1210 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
1211 ; X64-SSE-NEXT: pmuludq %xmm2, %xmm0
1212 ; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1213 ; X64-SSE-NEXT: movq %xmm1, (%rax,%rdx,4)
1214 ; X64-SSE-NEXT: retq
1216 ; X64-AVX-LABEL: mul_2xi16_sext_zext:
1217 ; X64-AVX: # %bb.0: # %entry
1218 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
1219 ; X64-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1220 ; X64-AVX-NEXT: vpmovsxwd %xmm0, %xmm0
1221 ; X64-AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
1222 ; X64-AVX-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
1223 ; X64-AVX-NEXT: vpmulld %xmm0, %xmm1, %xmm0
1224 ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rdx,4)
1225 ; X64-AVX-NEXT: retq
1227 %pre = load i32*, i32** @c
1228 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
1229 %tmp7 = bitcast i8* %tmp6 to <2 x i16>*
1230 %wide.load = load <2 x i16>, <2 x i16>* %tmp7, align 1
1231 %tmp8 = sext <2 x i16> %wide.load to <2 x i32>
1232 %tmp10 = getelementptr inbounds i8, i8* %b, i64 %index
1233 %tmp11 = bitcast i8* %tmp10 to <2 x i16>*
1234 %wide.load17 = load <2 x i16>, <2 x i16>* %tmp11, align 1
1235 %tmp12 = zext <2 x i16> %wide.load17 to <2 x i32>
1236 %tmp13 = mul nuw nsw <2 x i32> %tmp12, %tmp8
1237 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
1238 %tmp15 = bitcast i32* %tmp14 to <2 x i32>*
1239 store <2 x i32> %tmp13, <2 x i32>* %tmp15, align 4
1243 ; %val1 = load <16 x i16>
1244 ; %op1 = sext<16 x i32> %val1
1245 ; %val2 = load <16 x i16>
1246 ; %op2 = sext<16 x i32> %val2
1247 ; %rst = mul <16 x i32> %op1, %op2
1249 define void @mul_16xi16_sext(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) nounwind {
1250 ; X86-SSE-LABEL: mul_16xi16_sext:
1251 ; X86-SSE: # %bb.0: # %entry
1252 ; X86-SSE-NEXT: pushl %esi
1253 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
1254 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
1255 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
1256 ; X86-SSE-NEXT: movl c, %esi
1257 ; X86-SSE-NEXT: movdqu (%edx,%ecx), %xmm0
1258 ; X86-SSE-NEXT: movdqu 16(%edx,%ecx), %xmm1
1259 ; X86-SSE-NEXT: movdqu (%eax,%ecx), %xmm2
1260 ; X86-SSE-NEXT: movdqu 16(%eax,%ecx), %xmm3
1261 ; X86-SSE-NEXT: movdqa %xmm2, %xmm4
1262 ; X86-SSE-NEXT: pmulhw %xmm0, %xmm4
1263 ; X86-SSE-NEXT: pmullw %xmm0, %xmm2
1264 ; X86-SSE-NEXT: movdqa %xmm2, %xmm0
1265 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
1266 ; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
1267 ; X86-SSE-NEXT: movdqa %xmm3, %xmm4
1268 ; X86-SSE-NEXT: pmulhw %xmm1, %xmm4
1269 ; X86-SSE-NEXT: pmullw %xmm1, %xmm3
1270 ; X86-SSE-NEXT: movdqa %xmm3, %xmm1
1271 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
1272 ; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
1273 ; X86-SSE-NEXT: movdqu %xmm3, 48(%esi,%ecx,4)
1274 ; X86-SSE-NEXT: movdqu %xmm1, 32(%esi,%ecx,4)
1275 ; X86-SSE-NEXT: movdqu %xmm2, 16(%esi,%ecx,4)
1276 ; X86-SSE-NEXT: movdqu %xmm0, (%esi,%ecx,4)
1277 ; X86-SSE-NEXT: popl %esi
1278 ; X86-SSE-NEXT: retl
1280 ; X86-AVX1-LABEL: mul_16xi16_sext:
1281 ; X86-AVX1: # %bb.0: # %entry
1282 ; X86-AVX1-NEXT: pushl %esi
1283 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
1284 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %ecx
1285 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %edx
1286 ; X86-AVX1-NEXT: movl c, %esi
1287 ; X86-AVX1-NEXT: vpmovsxwd 16(%edx,%ecx), %xmm0
1288 ; X86-AVX1-NEXT: vpmovsxwd 24(%edx,%ecx), %xmm1
1289 ; X86-AVX1-NEXT: vpmovsxwd (%edx,%ecx), %xmm2
1290 ; X86-AVX1-NEXT: vpmovsxwd 8(%edx,%ecx), %xmm3
1291 ; X86-AVX1-NEXT: vpmovsxwd 16(%eax,%ecx), %xmm4
1292 ; X86-AVX1-NEXT: vpmulld %xmm0, %xmm4, %xmm0
1293 ; X86-AVX1-NEXT: vpmovsxwd 24(%eax,%ecx), %xmm4
1294 ; X86-AVX1-NEXT: vpmulld %xmm1, %xmm4, %xmm1
1295 ; X86-AVX1-NEXT: vpmovsxwd (%eax,%ecx), %xmm4
1296 ; X86-AVX1-NEXT: vpmulld %xmm2, %xmm4, %xmm2
1297 ; X86-AVX1-NEXT: vpmovsxwd 8(%eax,%ecx), %xmm4
1298 ; X86-AVX1-NEXT: vpmulld %xmm3, %xmm4, %xmm3
1299 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
1300 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
1301 ; X86-AVX1-NEXT: vmovups %ymm0, 32(%esi,%ecx,4)
1302 ; X86-AVX1-NEXT: vmovups %ymm2, (%esi,%ecx,4)
1303 ; X86-AVX1-NEXT: popl %esi
1304 ; X86-AVX1-NEXT: vzeroupper
1305 ; X86-AVX1-NEXT: retl
1307 ; X86-AVX2-LABEL: mul_16xi16_sext:
1308 ; X86-AVX2: # %bb.0: # %entry
1309 ; X86-AVX2-NEXT: pushl %esi
1310 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
1311 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
1312 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %edx
1313 ; X86-AVX2-NEXT: movl c, %esi
1314 ; X86-AVX2-NEXT: vpmovsxwd 16(%edx,%ecx), %ymm0
1315 ; X86-AVX2-NEXT: vpmovsxwd (%edx,%ecx), %ymm1
1316 ; X86-AVX2-NEXT: vpmovsxwd 16(%eax,%ecx), %ymm2
1317 ; X86-AVX2-NEXT: vpmulld %ymm0, %ymm2, %ymm0
1318 ; X86-AVX2-NEXT: vpmovsxwd (%eax,%ecx), %ymm2
1319 ; X86-AVX2-NEXT: vpmulld %ymm1, %ymm2, %ymm1
1320 ; X86-AVX2-NEXT: vmovdqu %ymm0, 32(%esi,%ecx,4)
1321 ; X86-AVX2-NEXT: vmovdqu %ymm1, (%esi,%ecx,4)
1322 ; X86-AVX2-NEXT: popl %esi
1323 ; X86-AVX2-NEXT: vzeroupper
1324 ; X86-AVX2-NEXT: retl
1326 ; X64-SSE-LABEL: mul_16xi16_sext:
1327 ; X64-SSE: # %bb.0: # %entry
1328 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
1329 ; X64-SSE-NEXT: movdqu (%rdi,%rdx), %xmm0
1330 ; X64-SSE-NEXT: movdqu 16(%rdi,%rdx), %xmm1
1331 ; X64-SSE-NEXT: movdqu (%rsi,%rdx), %xmm2
1332 ; X64-SSE-NEXT: movdqu 16(%rsi,%rdx), %xmm3
1333 ; X64-SSE-NEXT: movdqa %xmm2, %xmm4
1334 ; X64-SSE-NEXT: pmulhw %xmm0, %xmm4
1335 ; X64-SSE-NEXT: pmullw %xmm0, %xmm2
1336 ; X64-SSE-NEXT: movdqa %xmm2, %xmm0
1337 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
1338 ; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
1339 ; X64-SSE-NEXT: movdqa %xmm3, %xmm4
1340 ; X64-SSE-NEXT: pmulhw %xmm1, %xmm4
1341 ; X64-SSE-NEXT: pmullw %xmm1, %xmm3
1342 ; X64-SSE-NEXT: movdqa %xmm3, %xmm1
1343 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
1344 ; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
1345 ; X64-SSE-NEXT: movdqu %xmm3, 48(%rax,%rdx,4)
1346 ; X64-SSE-NEXT: movdqu %xmm1, 32(%rax,%rdx,4)
1347 ; X64-SSE-NEXT: movdqu %xmm2, 16(%rax,%rdx,4)
1348 ; X64-SSE-NEXT: movdqu %xmm0, (%rax,%rdx,4)
1349 ; X64-SSE-NEXT: retq
1351 ; X64-AVX1-LABEL: mul_16xi16_sext:
1352 ; X64-AVX1: # %bb.0: # %entry
1353 ; X64-AVX1-NEXT: movq {{.*}}(%rip), %rax
1354 ; X64-AVX1-NEXT: vpmovsxwd 16(%rdi,%rdx), %xmm0
1355 ; X64-AVX1-NEXT: vpmovsxwd 24(%rdi,%rdx), %xmm1
1356 ; X64-AVX1-NEXT: vpmovsxwd (%rdi,%rdx), %xmm2
1357 ; X64-AVX1-NEXT: vpmovsxwd 8(%rdi,%rdx), %xmm3
1358 ; X64-AVX1-NEXT: vpmovsxwd 16(%rsi,%rdx), %xmm4
1359 ; X64-AVX1-NEXT: vpmulld %xmm0, %xmm4, %xmm0
1360 ; X64-AVX1-NEXT: vpmovsxwd 24(%rsi,%rdx), %xmm4
1361 ; X64-AVX1-NEXT: vpmulld %xmm1, %xmm4, %xmm1
1362 ; X64-AVX1-NEXT: vpmovsxwd (%rsi,%rdx), %xmm4
1363 ; X64-AVX1-NEXT: vpmulld %xmm2, %xmm4, %xmm2
1364 ; X64-AVX1-NEXT: vpmovsxwd 8(%rsi,%rdx), %xmm4
1365 ; X64-AVX1-NEXT: vpmulld %xmm3, %xmm4, %xmm3
1366 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
1367 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
1368 ; X64-AVX1-NEXT: vmovups %ymm0, 32(%rax,%rdx,4)
1369 ; X64-AVX1-NEXT: vmovups %ymm2, (%rax,%rdx,4)
1370 ; X64-AVX1-NEXT: vzeroupper
1371 ; X64-AVX1-NEXT: retq
1373 ; X64-AVX2-LABEL: mul_16xi16_sext:
1374 ; X64-AVX2: # %bb.0: # %entry
1375 ; X64-AVX2-NEXT: movq {{.*}}(%rip), %rax
1376 ; X64-AVX2-NEXT: vpmovsxwd 16(%rdi,%rdx), %ymm0
1377 ; X64-AVX2-NEXT: vpmovsxwd (%rdi,%rdx), %ymm1
1378 ; X64-AVX2-NEXT: vpmovsxwd 16(%rsi,%rdx), %ymm2
1379 ; X64-AVX2-NEXT: vpmulld %ymm0, %ymm2, %ymm0
1380 ; X64-AVX2-NEXT: vpmovsxwd (%rsi,%rdx), %ymm2
1381 ; X64-AVX2-NEXT: vpmulld %ymm1, %ymm2, %ymm1
1382 ; X64-AVX2-NEXT: vmovdqu %ymm0, 32(%rax,%rdx,4)
1383 ; X64-AVX2-NEXT: vmovdqu %ymm1, (%rax,%rdx,4)
1384 ; X64-AVX2-NEXT: vzeroupper
1385 ; X64-AVX2-NEXT: retq
1387 %pre = load i32*, i32** @c
1388 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
1389 %tmp7 = bitcast i8* %tmp6 to <16 x i16>*
1390 %wide.load = load <16 x i16>, <16 x i16>* %tmp7, align 1
1391 %tmp8 = sext <16 x i16> %wide.load to <16 x i32>
1392 %tmp10 = getelementptr inbounds i8, i8* %b, i64 %index
1393 %tmp11 = bitcast i8* %tmp10 to <16 x i16>*
1394 %wide.load17 = load <16 x i16>, <16 x i16>* %tmp11, align 1
1395 %tmp12 = sext <16 x i16> %wide.load17 to <16 x i32>
1396 %tmp13 = mul nuw nsw <16 x i32> %tmp12, %tmp8
1397 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
1398 %tmp15 = bitcast i32* %tmp14 to <16 x i32>*
1399 store <16 x i32> %tmp13, <16 x i32>* %tmp15, align 4
1403 ; %val = load <2 x i8>
1404 ; %op1 = zext<2 x i32> %val
1405 ; %op2 = const <2 x i32> {c1, c2} // c1 and c2 are within (0 ~ 255)
1406 ; %rst = mul <2 x i32> %op1, %op2
1408 define void @mul_2xi8_varconst1(i8* nocapture readonly %a, i64 %index) {
1409 ; X86-SSE-LABEL: mul_2xi8_varconst1:
1410 ; X86-SSE: # %bb.0: # %entry
1411 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
1412 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
1413 ; X86-SSE-NEXT: movl c, %edx
1414 ; X86-SSE-NEXT: movzwl (%ecx,%eax), %ecx
1415 ; X86-SSE-NEXT: movd %ecx, %xmm0
1416 ; X86-SSE-NEXT: pxor %xmm1, %xmm1
1417 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
1418 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1419 ; X86-SSE-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0
1420 ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
1421 ; X86-SSE-NEXT: retl
1423 ; X86-AVX-LABEL: mul_2xi8_varconst1:
1424 ; X86-AVX: # %bb.0: # %entry
1425 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
1426 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
1427 ; X86-AVX-NEXT: movl c, %edx
1428 ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
1429 ; X86-AVX-NEXT: vmovd %ecx, %xmm0
1430 ; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
1431 ; X86-AVX-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0
1432 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
1433 ; X86-AVX-NEXT: retl
1435 ; X64-SSE-LABEL: mul_2xi8_varconst1:
1436 ; X64-SSE: # %bb.0: # %entry
1437 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
1438 ; X64-SSE-NEXT: movzwl (%rdi,%rsi), %ecx
1439 ; X64-SSE-NEXT: movd %ecx, %xmm0
1440 ; X64-SSE-NEXT: pxor %xmm1, %xmm1
1441 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
1442 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1443 ; X64-SSE-NEXT: pmaddwd {{.*}}(%rip), %xmm0
1444 ; X64-SSE-NEXT: movq %xmm0, (%rax,%rsi,4)
1445 ; X64-SSE-NEXT: retq
1447 ; X64-AVX-LABEL: mul_2xi8_varconst1:
1448 ; X64-AVX: # %bb.0: # %entry
1449 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
1450 ; X64-AVX-NEXT: movzwl (%rdi,%rsi), %ecx
1451 ; X64-AVX-NEXT: vmovd %ecx, %xmm0
1452 ; X64-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
1453 ; X64-AVX-NEXT: vpmaddwd {{.*}}(%rip), %xmm0, %xmm0
1454 ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rsi,4)
1455 ; X64-AVX-NEXT: retq
1457 %pre = load i32*, i32** @c
1458 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
1459 %tmp7 = bitcast i8* %tmp6 to <2 x i8>*
1460 %wide.load = load <2 x i8>, <2 x i8>* %tmp7, align 1
1461 %tmp8 = zext <2 x i8> %wide.load to <2 x i32>
1462 %tmp13 = mul nuw nsw <2 x i32> %tmp8, <i32 0, i32 255>
1463 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
1464 %tmp15 = bitcast i32* %tmp14 to <2 x i32>*
1465 store <2 x i32> %tmp13, <2 x i32>* %tmp15, align 4
1469 ; %val = load <2 x i8>
1470 ; %op1 = sext<2 x i32> %val
1471 ; %op2 = const <2 x i32> {c1, c2} // c1 and c2 are within (-128 ~ 127)
1472 ; %rst = mul <2 x i32> %op1, %op2
1474 define void @mul_2xi8_varconst2(i8* nocapture readonly %a, i64 %index) {
1475 ; X86-SSE-LABEL: mul_2xi8_varconst2:
1476 ; X86-SSE: # %bb.0: # %entry
1477 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
1478 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
1479 ; X86-SSE-NEXT: movl c, %edx
1480 ; X86-SSE-NEXT: movzwl (%ecx,%eax), %ecx
1481 ; X86-SSE-NEXT: movd %ecx, %xmm0
1482 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1483 ; X86-SSE-NEXT: psraw $8, %xmm0
1484 ; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
1485 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
1486 ; X86-SSE-NEXT: psrad $16, %xmm0
1487 ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
1488 ; X86-SSE-NEXT: retl
1490 ; X86-AVX-LABEL: mul_2xi8_varconst2:
1491 ; X86-AVX: # %bb.0: # %entry
1492 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
1493 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
1494 ; X86-AVX-NEXT: movl c, %edx
1495 ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
1496 ; X86-AVX-NEXT: vmovd %ecx, %xmm0
1497 ; X86-AVX-NEXT: vpmovsxbd %xmm0, %xmm0
1498 ; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
1499 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
1500 ; X86-AVX-NEXT: retl
1502 ; X64-SSE-LABEL: mul_2xi8_varconst2:
1503 ; X64-SSE: # %bb.0: # %entry
1504 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
1505 ; X64-SSE-NEXT: movzwl (%rdi,%rsi), %ecx
1506 ; X64-SSE-NEXT: movd %ecx, %xmm0
1507 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1508 ; X64-SSE-NEXT: psraw $8, %xmm0
1509 ; X64-SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
1510 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
1511 ; X64-SSE-NEXT: psrad $16, %xmm0
1512 ; X64-SSE-NEXT: movq %xmm0, (%rax,%rsi,4)
1513 ; X64-SSE-NEXT: retq
1515 ; X64-AVX-LABEL: mul_2xi8_varconst2:
1516 ; X64-AVX: # %bb.0: # %entry
1517 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
1518 ; X64-AVX-NEXT: movzwl (%rdi,%rsi), %ecx
1519 ; X64-AVX-NEXT: vmovd %ecx, %xmm0
1520 ; X64-AVX-NEXT: vpmovsxbd %xmm0, %xmm0
1521 ; X64-AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
1522 ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rsi,4)
1523 ; X64-AVX-NEXT: retq
1525 %pre = load i32*, i32** @c
1526 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
1527 %tmp7 = bitcast i8* %tmp6 to <2 x i8>*
1528 %wide.load = load <2 x i8>, <2 x i8>* %tmp7, align 1
1529 %tmp8 = sext <2 x i8> %wide.load to <2 x i32>
1530 %tmp13 = mul nuw nsw <2 x i32> %tmp8, <i32 -128, i32 127>
1531 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
1532 %tmp15 = bitcast i32* %tmp14 to <2 x i32>*
1533 store <2 x i32> %tmp13, <2 x i32>* %tmp15, align 4
1537 ; %val = load <2 x i8>
1538 ; %op1 = zext<2 x i32> %val
1539 ; %op2 = const <2 x i32> {c1, c2} // c1 and c2 are within (0 ~ 256)
1540 ; %rst = mul <2 x i32> %op1, %op2
1542 define void @mul_2xi8_varconst3(i8* nocapture readonly %a, i64 %index) {
1543 ; X86-SSE-LABEL: mul_2xi8_varconst3:
1544 ; X86-SSE: # %bb.0: # %entry
1545 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
1546 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
1547 ; X86-SSE-NEXT: movl c, %edx
1548 ; X86-SSE-NEXT: movzwl (%ecx,%eax), %ecx
1549 ; X86-SSE-NEXT: movd %ecx, %xmm0
1550 ; X86-SSE-NEXT: pxor %xmm1, %xmm1
1551 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
1552 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1553 ; X86-SSE-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0
1554 ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
1555 ; X86-SSE-NEXT: retl
1557 ; X86-AVX-LABEL: mul_2xi8_varconst3:
1558 ; X86-AVX: # %bb.0: # %entry
1559 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
1560 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
1561 ; X86-AVX-NEXT: movl c, %edx
1562 ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
1563 ; X86-AVX-NEXT: vmovd %ecx, %xmm0
1564 ; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
1565 ; X86-AVX-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0
1566 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
1567 ; X86-AVX-NEXT: retl
1569 ; X64-SSE-LABEL: mul_2xi8_varconst3:
1570 ; X64-SSE: # %bb.0: # %entry
1571 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
1572 ; X64-SSE-NEXT: movzwl (%rdi,%rsi), %ecx
1573 ; X64-SSE-NEXT: movd %ecx, %xmm0
1574 ; X64-SSE-NEXT: pxor %xmm1, %xmm1
1575 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
1576 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1577 ; X64-SSE-NEXT: pmaddwd {{.*}}(%rip), %xmm0
1578 ; X64-SSE-NEXT: movq %xmm0, (%rax,%rsi,4)
1579 ; X64-SSE-NEXT: retq
1581 ; X64-AVX-LABEL: mul_2xi8_varconst3:
1582 ; X64-AVX: # %bb.0: # %entry
1583 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
1584 ; X64-AVX-NEXT: movzwl (%rdi,%rsi), %ecx
1585 ; X64-AVX-NEXT: vmovd %ecx, %xmm0
1586 ; X64-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
1587 ; X64-AVX-NEXT: vpmaddwd {{.*}}(%rip), %xmm0, %xmm0
1588 ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rsi,4)
1589 ; X64-AVX-NEXT: retq
1591 %pre = load i32*, i32** @c
1592 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
1593 %tmp7 = bitcast i8* %tmp6 to <2 x i8>*
1594 %wide.load = load <2 x i8>, <2 x i8>* %tmp7, align 1
1595 %tmp8 = zext <2 x i8> %wide.load to <2 x i32>
1596 %tmp13 = mul nuw nsw <2 x i32> %tmp8, <i32 0, i32 256>
1597 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
1598 %tmp15 = bitcast i32* %tmp14 to <2 x i32>*
1599 store <2 x i32> %tmp13, <2 x i32>* %tmp15, align 4
1603 ; %val = load <2 x i8>
1604 ; %op1 = zext<2 x i32> %val
1605 ; %op2 = const <2 x i32> {c1, c2} // c1 and c2 are within (-1 ~ 255)
1606 ; %rst = mul <2 x i32> %op1, %op2
1608 define void @mul_2xi8_varconst4(i8* nocapture readonly %a, i64 %index) {
1609 ; X86-SSE-LABEL: mul_2xi8_varconst4:
1610 ; X86-SSE: # %bb.0: # %entry
1611 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
1612 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
1613 ; X86-SSE-NEXT: movl c, %edx
1614 ; X86-SSE-NEXT: movzwl (%ecx,%eax), %ecx
1615 ; X86-SSE-NEXT: movd %ecx, %xmm0
1616 ; X86-SSE-NEXT: pxor %xmm1, %xmm1
1617 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
1618 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = <65535,255,u,u,u,u,u,u>
1619 ; X86-SSE-NEXT: movdqa %xmm0, %xmm2
1620 ; X86-SSE-NEXT: pmulhw %xmm1, %xmm2
1621 ; X86-SSE-NEXT: pmullw %xmm1, %xmm0
1622 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1623 ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
1624 ; X86-SSE-NEXT: retl
1626 ; X86-AVX-LABEL: mul_2xi8_varconst4:
1627 ; X86-AVX: # %bb.0: # %entry
1628 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
1629 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
1630 ; X86-AVX-NEXT: movl c, %edx
1631 ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
1632 ; X86-AVX-NEXT: vmovd %ecx, %xmm0
1633 ; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
1634 ; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
1635 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
1636 ; X86-AVX-NEXT: retl
1638 ; X64-SSE-LABEL: mul_2xi8_varconst4:
1639 ; X64-SSE: # %bb.0: # %entry
1640 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
1641 ; X64-SSE-NEXT: movzwl (%rdi,%rsi), %ecx
1642 ; X64-SSE-NEXT: movd %ecx, %xmm0
1643 ; X64-SSE-NEXT: pxor %xmm1, %xmm1
1644 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
1645 ; X64-SSE-NEXT: movdqa {{.*#+}} xmm1 = <65535,255,u,u,u,u,u,u>
1646 ; X64-SSE-NEXT: movdqa %xmm0, %xmm2
1647 ; X64-SSE-NEXT: pmulhw %xmm1, %xmm2
1648 ; X64-SSE-NEXT: pmullw %xmm1, %xmm0
1649 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1650 ; X64-SSE-NEXT: movq %xmm0, (%rax,%rsi,4)
1651 ; X64-SSE-NEXT: retq
1653 ; X64-AVX-LABEL: mul_2xi8_varconst4:
1654 ; X64-AVX: # %bb.0: # %entry
1655 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
1656 ; X64-AVX-NEXT: movzwl (%rdi,%rsi), %ecx
1657 ; X64-AVX-NEXT: vmovd %ecx, %xmm0
1658 ; X64-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
1659 ; X64-AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
1660 ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rsi,4)
1661 ; X64-AVX-NEXT: retq
1663 %pre = load i32*, i32** @c
1664 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
1665 %tmp7 = bitcast i8* %tmp6 to <2 x i8>*
1666 %wide.load = load <2 x i8>, <2 x i8>* %tmp7, align 1
1667 %tmp8 = zext <2 x i8> %wide.load to <2 x i32>
1668 %tmp13 = mul nuw nsw <2 x i32> %tmp8, <i32 -1, i32 255>
1669 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
1670 %tmp15 = bitcast i32* %tmp14 to <2 x i32>*
1671 store <2 x i32> %tmp13, <2 x i32>* %tmp15, align 4
1675 ; %val = load <2 x i8>
1676 ; %op1 = sext<2 x i32> %val
1677 ; %op2 = const <2 x i32> {c1, c2} // c1 and c2 are within (-129 ~ 127)
1678 ; %rst = mul <2 x i32> %op1, %op2
1680 define void @mul_2xi8_varconst5(i8* nocapture readonly %a, i64 %index) {
1681 ; X86-SSE-LABEL: mul_2xi8_varconst5:
1682 ; X86-SSE: # %bb.0: # %entry
1683 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
1684 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
1685 ; X86-SSE-NEXT: movl c, %edx
1686 ; X86-SSE-NEXT: movzwl (%ecx,%eax), %ecx
1687 ; X86-SSE-NEXT: movd %ecx, %xmm0
1688 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1689 ; X86-SSE-NEXT: psraw $8, %xmm0
1690 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = <65407,127,u,u,u,u,u,u>
1691 ; X86-SSE-NEXT: movdqa %xmm0, %xmm2
1692 ; X86-SSE-NEXT: pmulhw %xmm1, %xmm2
1693 ; X86-SSE-NEXT: pmullw %xmm1, %xmm0
1694 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1695 ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
1696 ; X86-SSE-NEXT: retl
1698 ; X86-AVX-LABEL: mul_2xi8_varconst5:
1699 ; X86-AVX: # %bb.0: # %entry
1700 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
1701 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
1702 ; X86-AVX-NEXT: movl c, %edx
1703 ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
1704 ; X86-AVX-NEXT: vmovd %ecx, %xmm0
1705 ; X86-AVX-NEXT: vpmovsxbd %xmm0, %xmm0
1706 ; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
1707 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
1708 ; X86-AVX-NEXT: retl
1710 ; X64-SSE-LABEL: mul_2xi8_varconst5:
1711 ; X64-SSE: # %bb.0: # %entry
1712 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
1713 ; X64-SSE-NEXT: movzwl (%rdi,%rsi), %ecx
1714 ; X64-SSE-NEXT: movd %ecx, %xmm0
1715 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1716 ; X64-SSE-NEXT: psraw $8, %xmm0
1717 ; X64-SSE-NEXT: movdqa {{.*#+}} xmm1 = <65407,127,u,u,u,u,u,u>
1718 ; X64-SSE-NEXT: movdqa %xmm0, %xmm2
1719 ; X64-SSE-NEXT: pmulhw %xmm1, %xmm2
1720 ; X64-SSE-NEXT: pmullw %xmm1, %xmm0
1721 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1722 ; X64-SSE-NEXT: movq %xmm0, (%rax,%rsi,4)
1723 ; X64-SSE-NEXT: retq
1725 ; X64-AVX-LABEL: mul_2xi8_varconst5:
1726 ; X64-AVX: # %bb.0: # %entry
1727 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
1728 ; X64-AVX-NEXT: movzwl (%rdi,%rsi), %ecx
1729 ; X64-AVX-NEXT: vmovd %ecx, %xmm0
1730 ; X64-AVX-NEXT: vpmovsxbd %xmm0, %xmm0
1731 ; X64-AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
1732 ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rsi,4)
1733 ; X64-AVX-NEXT: retq
1735 %pre = load i32*, i32** @c
1736 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
1737 %tmp7 = bitcast i8* %tmp6 to <2 x i8>*
1738 %wide.load = load <2 x i8>, <2 x i8>* %tmp7, align 1
1739 %tmp8 = sext <2 x i8> %wide.load to <2 x i32>
1740 %tmp13 = mul nuw nsw <2 x i32> %tmp8, <i32 -129, i32 127>
1741 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
1742 %tmp15 = bitcast i32* %tmp14 to <2 x i32>*
1743 store <2 x i32> %tmp13, <2 x i32>* %tmp15, align 4
1747 ; %val = load <2 x i8>
1748 ; %op1 = sext<2 x i32> %val
1749 ; %op2 = const <2 x i32> {c1, c2} // c1 and c2 are within (-128 ~ 128)
1750 ; %rst = mul <2 x i32> %op1, %op2
1752 define void @mul_2xi8_varconst6(i8* nocapture readonly %a, i64 %index) {
1753 ; X86-SSE-LABEL: mul_2xi8_varconst6:
1754 ; X86-SSE: # %bb.0: # %entry
1755 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
1756 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
1757 ; X86-SSE-NEXT: movl c, %edx
1758 ; X86-SSE-NEXT: movzwl (%ecx,%eax), %ecx
1759 ; X86-SSE-NEXT: movd %ecx, %xmm0
1760 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1761 ; X86-SSE-NEXT: psraw $8, %xmm0
1762 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = <65408,128,u,u,u,u,u,u>
1763 ; X86-SSE-NEXT: movdqa %xmm0, %xmm2
1764 ; X86-SSE-NEXT: pmulhw %xmm1, %xmm2
1765 ; X86-SSE-NEXT: pmullw %xmm1, %xmm0
1766 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1767 ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
1768 ; X86-SSE-NEXT: retl
1770 ; X86-AVX-LABEL: mul_2xi8_varconst6:
1771 ; X86-AVX: # %bb.0: # %entry
1772 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
1773 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
1774 ; X86-AVX-NEXT: movl c, %edx
1775 ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
1776 ; X86-AVX-NEXT: vmovd %ecx, %xmm0
1777 ; X86-AVX-NEXT: vpmovsxbd %xmm0, %xmm0
1778 ; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
1779 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
1780 ; X86-AVX-NEXT: retl
1782 ; X64-SSE-LABEL: mul_2xi8_varconst6:
1783 ; X64-SSE: # %bb.0: # %entry
1784 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
1785 ; X64-SSE-NEXT: movzwl (%rdi,%rsi), %ecx
1786 ; X64-SSE-NEXT: movd %ecx, %xmm0
1787 ; X64-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1788 ; X64-SSE-NEXT: psraw $8, %xmm0
1789 ; X64-SSE-NEXT: movdqa {{.*#+}} xmm1 = <65408,128,u,u,u,u,u,u>
1790 ; X64-SSE-NEXT: movdqa %xmm0, %xmm2
1791 ; X64-SSE-NEXT: pmulhw %xmm1, %xmm2
1792 ; X64-SSE-NEXT: pmullw %xmm1, %xmm0
1793 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1794 ; X64-SSE-NEXT: movq %xmm0, (%rax,%rsi,4)
1795 ; X64-SSE-NEXT: retq
1797 ; X64-AVX-LABEL: mul_2xi8_varconst6:
1798 ; X64-AVX: # %bb.0: # %entry
1799 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
1800 ; X64-AVX-NEXT: movzwl (%rdi,%rsi), %ecx
1801 ; X64-AVX-NEXT: vmovd %ecx, %xmm0
1802 ; X64-AVX-NEXT: vpmovsxbd %xmm0, %xmm0
1803 ; X64-AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
1804 ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rsi,4)
1805 ; X64-AVX-NEXT: retq
1807 %pre = load i32*, i32** @c
1808 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
1809 %tmp7 = bitcast i8* %tmp6 to <2 x i8>*
1810 %wide.load = load <2 x i8>, <2 x i8>* %tmp7, align 1
1811 %tmp8 = sext <2 x i8> %wide.load to <2 x i32>
1812 %tmp13 = mul nuw nsw <2 x i32> %tmp8, <i32 -128, i32 128>
1813 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
1814 %tmp15 = bitcast i32* %tmp14 to <2 x i32>*
1815 store <2 x i32> %tmp13, <2 x i32>* %tmp15, align 4
1819 ; %val = load <2 x i16>
1820 ; %op1 = zext<2 x i32> %val
1821 ; %op2 = const <2 x i32> {c1, c2} // c1 and c2 are within (0 ~ 65535)
1822 ; %rst = mul <2 x i32> %op1, %op2
1824 define void @mul_2xi16_varconst1(i8* nocapture readonly %a, i64 %index) {
1825 ; X86-SSE-LABEL: mul_2xi16_varconst1:
1826 ; X86-SSE: # %bb.0: # %entry
1827 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
1828 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
1829 ; X86-SSE-NEXT: movl c, %edx
1830 ; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1831 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = <0,65535,u,u,u,u,u,u>
1832 ; X86-SSE-NEXT: movdqa %xmm0, %xmm2
1833 ; X86-SSE-NEXT: pmulhuw %xmm1, %xmm2
1834 ; X86-SSE-NEXT: pmullw %xmm1, %xmm0
1835 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1836 ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
1837 ; X86-SSE-NEXT: retl
1839 ; X86-AVX-LABEL: mul_2xi16_varconst1:
1840 ; X86-AVX: # %bb.0: # %entry
1841 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
1842 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
1843 ; X86-AVX-NEXT: movl c, %edx
1844 ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1845 ; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
1846 ; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
1847 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
1848 ; X86-AVX-NEXT: retl
1850 ; X64-SSE-LABEL: mul_2xi16_varconst1:
1851 ; X64-SSE: # %bb.0: # %entry
1852 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
1853 ; X64-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1854 ; X64-SSE-NEXT: movdqa {{.*#+}} xmm1 = <0,65535,u,u,u,u,u,u>
1855 ; X64-SSE-NEXT: movdqa %xmm0, %xmm2
1856 ; X64-SSE-NEXT: pmulhuw %xmm1, %xmm2
1857 ; X64-SSE-NEXT: pmullw %xmm1, %xmm0
1858 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1859 ; X64-SSE-NEXT: movq %xmm0, (%rax,%rsi,4)
1860 ; X64-SSE-NEXT: retq
1862 ; X64-AVX-LABEL: mul_2xi16_varconst1:
1863 ; X64-AVX: # %bb.0: # %entry
1864 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
1865 ; X64-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1866 ; X64-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
1867 ; X64-AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
1868 ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rsi,4)
1869 ; X64-AVX-NEXT: retq
1871 %pre = load i32*, i32** @c
1872 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
1873 %tmp7 = bitcast i8* %tmp6 to <2 x i16>*
1874 %wide.load = load <2 x i16>, <2 x i16>* %tmp7, align 1
1875 %tmp8 = zext <2 x i16> %wide.load to <2 x i32>
1876 %tmp13 = mul nuw nsw <2 x i32> %tmp8, <i32 0, i32 65535>
1877 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
1878 %tmp15 = bitcast i32* %tmp14 to <2 x i32>*
1879 store <2 x i32> %tmp13, <2 x i32>* %tmp15, align 4
1883 ; %val = load <2 x i16>
1884 ; %op1 = sext<2 x i32> %val
1885 ; %op2 = const <2 x i32> {c1, c2} // c1 and c2 are within (-32768 ~ 32767)
1886 ; %rst = mul <2 x i32> %op1, %op2
1888 define void @mul_2xi16_varconst2(i8* nocapture readonly %a, i64 %index) {
1889 ; X86-SSE-LABEL: mul_2xi16_varconst2:
1890 ; X86-SSE: # %bb.0: # %entry
1891 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
1892 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
1893 ; X86-SSE-NEXT: movl c, %edx
1894 ; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1895 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = <32768,32767,u,u,u,u,u,u>
1896 ; X86-SSE-NEXT: movdqa %xmm0, %xmm2
1897 ; X86-SSE-NEXT: pmulhw %xmm1, %xmm2
1898 ; X86-SSE-NEXT: pmullw %xmm1, %xmm0
1899 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1900 ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
1901 ; X86-SSE-NEXT: retl
1903 ; X86-AVX-LABEL: mul_2xi16_varconst2:
1904 ; X86-AVX: # %bb.0: # %entry
1905 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
1906 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
1907 ; X86-AVX-NEXT: movl c, %edx
1908 ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1909 ; X86-AVX-NEXT: vpmovsxwd %xmm0, %xmm0
1910 ; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
1911 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
1912 ; X86-AVX-NEXT: retl
1914 ; X64-SSE-LABEL: mul_2xi16_varconst2:
1915 ; X64-SSE: # %bb.0: # %entry
1916 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
1917 ; X64-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1918 ; X64-SSE-NEXT: movdqa {{.*#+}} xmm1 = <32768,32767,u,u,u,u,u,u>
1919 ; X64-SSE-NEXT: movdqa %xmm0, %xmm2
1920 ; X64-SSE-NEXT: pmulhw %xmm1, %xmm2
1921 ; X64-SSE-NEXT: pmullw %xmm1, %xmm0
1922 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1923 ; X64-SSE-NEXT: movq %xmm0, (%rax,%rsi,4)
1924 ; X64-SSE-NEXT: retq
1926 ; X64-AVX-LABEL: mul_2xi16_varconst2:
1927 ; X64-AVX: # %bb.0: # %entry
1928 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
1929 ; X64-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1930 ; X64-AVX-NEXT: vpmovsxwd %xmm0, %xmm0
1931 ; X64-AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
1932 ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rsi,4)
1933 ; X64-AVX-NEXT: retq
1935 %pre = load i32*, i32** @c
1936 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
1937 %tmp7 = bitcast i8* %tmp6 to <2 x i16>*
1938 %wide.load = load <2 x i16>, <2 x i16>* %tmp7, align 1
1939 %tmp8 = sext <2 x i16> %wide.load to <2 x i32>
1940 %tmp13 = mul nuw nsw <2 x i32> %tmp8, <i32 -32768, i32 32767>
1941 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
1942 %tmp15 = bitcast i32* %tmp14 to <2 x i32>*
1943 store <2 x i32> %tmp13, <2 x i32>* %tmp15, align 4
1947 ; %val = load <2 x i16>
1948 ; %op1 = zext<2 x i32> %val
1949 ; %op2 = const <2 x i32> {c1, c2} // c1 and c2 are within (0 ~ 65536)
1950 ; %rst = mul <2 x i32> %op1, %op2
1952 define void @mul_2xi16_varconst3(i8* nocapture readonly %a, i64 %index) {
1953 ; X86-SSE-LABEL: mul_2xi16_varconst3:
1954 ; X86-SSE: # %bb.0: # %entry
1955 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
1956 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
1957 ; X86-SSE-NEXT: movl c, %edx
1958 ; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1959 ; X86-SSE-NEXT: pxor %xmm1, %xmm1
1960 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1961 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = <0,65536,u,u>
1962 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
1963 ; X86-SSE-NEXT: pmuludq %xmm1, %xmm0
1964 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
1965 ; X86-SSE-NEXT: pmuludq %xmm2, %xmm1
1966 ; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1967 ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
1968 ; X86-SSE-NEXT: retl
1970 ; X86-AVX-LABEL: mul_2xi16_varconst3:
1971 ; X86-AVX: # %bb.0: # %entry
1972 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
1973 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
1974 ; X86-AVX-NEXT: movl c, %edx
1975 ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1976 ; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
1977 ; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
1978 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
1979 ; X86-AVX-NEXT: retl
1981 ; X64-SSE-LABEL: mul_2xi16_varconst3:
1982 ; X64-SSE: # %bb.0: # %entry
1983 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
1984 ; X64-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1985 ; X64-SSE-NEXT: pxor %xmm1, %xmm1
1986 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1987 ; X64-SSE-NEXT: movdqa {{.*#+}} xmm1 = <0,65536,u,u>
1988 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
1989 ; X64-SSE-NEXT: pmuludq %xmm1, %xmm0
1990 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
1991 ; X64-SSE-NEXT: pmuludq %xmm2, %xmm1
1992 ; X64-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1993 ; X64-SSE-NEXT: movq %xmm0, (%rax,%rsi,4)
1994 ; X64-SSE-NEXT: retq
1996 ; X64-AVX-LABEL: mul_2xi16_varconst3:
1997 ; X64-AVX: # %bb.0: # %entry
1998 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
1999 ; X64-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
2000 ; X64-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
2001 ; X64-AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
2002 ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rsi,4)
2003 ; X64-AVX-NEXT: retq
2005 %pre = load i32*, i32** @c
2006 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
2007 %tmp7 = bitcast i8* %tmp6 to <2 x i16>*
2008 %wide.load = load <2 x i16>, <2 x i16>* %tmp7, align 1
2009 %tmp8 = zext <2 x i16> %wide.load to <2 x i32>
2010 %tmp13 = mul nuw nsw <2 x i32> %tmp8, <i32 0, i32 65536>
2011 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
2012 %tmp15 = bitcast i32* %tmp14 to <2 x i32>*
2013 store <2 x i32> %tmp13, <2 x i32>* %tmp15, align 4
2017 ; %val = load <2 x i16>
2018 ; %op1 = sext<2 x i32> %val
2019 ; %op2 = const <2 x i32> {c1, c2} // c1 and c2 are within (0 ~ 32768)
2020 ; %rst = mul <2 x i32> %op1, %op2
2022 define void @mul_2xi16_varconst4(i8* nocapture readonly %a, i64 %index) {
2023 ; X86-SSE-LABEL: mul_2xi16_varconst4:
2024 ; X86-SSE: # %bb.0: # %entry
2025 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
2026 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
2027 ; X86-SSE-NEXT: movl c, %edx
2028 ; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
2029 ; X86-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
2030 ; X86-SSE-NEXT: psrad $16, %xmm0
2031 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = <0,32768,u,u>
2032 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
2033 ; X86-SSE-NEXT: pmuludq %xmm1, %xmm0
2034 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
2035 ; X86-SSE-NEXT: pmuludq %xmm2, %xmm1
2036 ; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2037 ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
2038 ; X86-SSE-NEXT: retl
2040 ; X86-AVX-LABEL: mul_2xi16_varconst4:
2041 ; X86-AVX: # %bb.0: # %entry
2042 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
2043 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
2044 ; X86-AVX-NEXT: movl c, %edx
2045 ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
2046 ; X86-AVX-NEXT: vpmovsxwd %xmm0, %xmm0
2047 ; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
2048 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
2049 ; X86-AVX-NEXT: retl
2051 ; X64-SSE-LABEL: mul_2xi16_varconst4:
2052 ; X64-SSE: # %bb.0: # %entry
2053 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rax
2054 ; X64-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
2055 ; X64-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
2056 ; X64-SSE-NEXT: psrad $16, %xmm0
2057 ; X64-SSE-NEXT: movdqa {{.*#+}} xmm1 = <0,32768,u,u>
2058 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
2059 ; X64-SSE-NEXT: pmuludq %xmm1, %xmm0
2060 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
2061 ; X64-SSE-NEXT: pmuludq %xmm2, %xmm1
2062 ; X64-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2063 ; X64-SSE-NEXT: movq %xmm0, (%rax,%rsi,4)
2064 ; X64-SSE-NEXT: retq
2066 ; X64-AVX-LABEL: mul_2xi16_varconst4:
2067 ; X64-AVX: # %bb.0: # %entry
2068 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rax
2069 ; X64-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
2070 ; X64-AVX-NEXT: vpmovsxwd %xmm0, %xmm0
2071 ; X64-AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
2072 ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rsi,4)
2073 ; X64-AVX-NEXT: retq
2075 %pre = load i32*, i32** @c
2076 %tmp6 = getelementptr inbounds i8, i8* %a, i64 %index
2077 %tmp7 = bitcast i8* %tmp6 to <2 x i16>*
2078 %wide.load = load <2 x i16>, <2 x i16>* %tmp7, align 1
2079 %tmp8 = sext <2 x i16> %wide.load to <2 x i32>
2080 %tmp13 = mul nuw nsw <2 x i32> %tmp8, <i32 0, i32 32768>
2081 %tmp14 = getelementptr inbounds i32, i32* %pre, i64 %index
2082 %tmp15 = bitcast i32* %tmp14 to <2 x i32>*
2083 store <2 x i32> %tmp13, <2 x i32>* %tmp15, align 4
2091 define void @PR34947(<9 x i16>* %p0, <9 x i32>* %p1) nounwind {
2092 ; X86-SSE-LABEL: PR34947:
2094 ; X86-SSE-NEXT: pushl %esi
2095 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
2096 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
2097 ; X86-SSE-NEXT: movdqa (%eax), %xmm5
2098 ; X86-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
2099 ; X86-SSE-NEXT: movdqa (%ecx), %xmm2
2100 ; X86-SSE-NEXT: movdqa 16(%ecx), %xmm6
2101 ; X86-SSE-NEXT: pxor %xmm0, %xmm0
2102 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
2103 ; X86-SSE-NEXT: movdqa %xmm5, %xmm4
2104 ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
2105 ; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
2106 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[3,1,2,3]
2107 ; X86-SSE-NEXT: movd %xmm0, %eax
2108 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[3,1,2,3]
2109 ; X86-SSE-NEXT: movd %xmm0, %esi
2110 ; X86-SSE-NEXT: xorl %edx, %edx
2111 ; X86-SSE-NEXT: divl %esi
2112 ; X86-SSE-NEXT: movd %edx, %xmm0
2113 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[2,3,0,1]
2114 ; X86-SSE-NEXT: movd %xmm3, %eax
2115 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,0,1]
2116 ; X86-SSE-NEXT: movd %xmm3, %esi
2117 ; X86-SSE-NEXT: xorl %edx, %edx
2118 ; X86-SSE-NEXT: divl %esi
2119 ; X86-SSE-NEXT: movd %edx, %xmm7
2120 ; X86-SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
2121 ; X86-SSE-NEXT: movd %xmm5, %eax
2122 ; X86-SSE-NEXT: movd %xmm6, %esi
2123 ; X86-SSE-NEXT: xorl %edx, %edx
2124 ; X86-SSE-NEXT: divl %esi
2125 ; X86-SSE-NEXT: movd %edx, %xmm3
2126 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
2127 ; X86-SSE-NEXT: movd %xmm5, %eax
2128 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,2,3]
2129 ; X86-SSE-NEXT: movd %xmm5, %esi
2130 ; X86-SSE-NEXT: xorl %edx, %edx
2131 ; X86-SSE-NEXT: divl %esi
2132 ; X86-SSE-NEXT: movd %edx, %xmm5
2133 ; X86-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
2134 ; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
2135 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm4[3,1,2,3]
2136 ; X86-SSE-NEXT: movd %xmm6, %eax
2137 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[3,1,2,3]
2138 ; X86-SSE-NEXT: movd %xmm6, %esi
2139 ; X86-SSE-NEXT: xorl %edx, %edx
2140 ; X86-SSE-NEXT: divl %esi
2141 ; X86-SSE-NEXT: movd %edx, %xmm6
2142 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[2,3,0,1]
2143 ; X86-SSE-NEXT: movd %xmm7, %eax
2144 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[2,3,0,1]
2145 ; X86-SSE-NEXT: movd %xmm7, %esi
2146 ; X86-SSE-NEXT: xorl %edx, %edx
2147 ; X86-SSE-NEXT: divl %esi
2148 ; X86-SSE-NEXT: movd %edx, %xmm7
2149 ; X86-SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
2150 ; X86-SSE-NEXT: movd %xmm4, %eax
2151 ; X86-SSE-NEXT: movd %xmm2, %esi
2152 ; X86-SSE-NEXT: xorl %edx, %edx
2153 ; X86-SSE-NEXT: divl %esi
2154 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
2155 ; X86-SSE-NEXT: movd %xmm4, %eax
2156 ; X86-SSE-NEXT: movd %edx, %xmm4
2157 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
2158 ; X86-SSE-NEXT: movd %xmm2, %esi
2159 ; X86-SSE-NEXT: xorl %edx, %edx
2160 ; X86-SSE-NEXT: divl %esi
2161 ; X86-SSE-NEXT: movd %edx, %xmm2
2162 ; X86-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
2163 ; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm7[0]
2164 ; X86-SSE-NEXT: movd %xmm1, %eax
2165 ; X86-SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm6[0,0]
2166 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [8199,8199,8199,8199]
2167 ; X86-SSE-NEXT: pmuludq %xmm1, %xmm4
2168 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
2169 ; X86-SSE-NEXT: pmuludq %xmm1, %xmm2
2170 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
2171 ; X86-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
2172 ; X86-SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,0],xmm0[0,0]
2173 ; X86-SSE-NEXT: pmuludq %xmm1, %xmm3
2174 ; X86-SSE-NEXT: pmuludq %xmm1, %xmm5
2175 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
2176 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,2,2,3]
2177 ; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2178 ; X86-SSE-NEXT: xorl %edx, %edx
2179 ; X86-SSE-NEXT: divl 32(%ecx)
2180 ; X86-SSE-NEXT: movdqa %xmm0, (%eax)
2181 ; X86-SSE-NEXT: movdqa %xmm4, (%eax)
2182 ; X86-SSE-NEXT: imull $8199, %edx, %eax # imm = 0x2007
2183 ; X86-SSE-NEXT: movl %eax, (%eax)
2184 ; X86-SSE-NEXT: popl %esi
2185 ; X86-SSE-NEXT: retl
2187 ; X86-AVX1-LABEL: PR34947:
2188 ; X86-AVX1: # %bb.0:
2189 ; X86-AVX1-NEXT: pushl %ebp
2190 ; X86-AVX1-NEXT: pushl %ebx
2191 ; X86-AVX1-NEXT: pushl %edi
2192 ; X86-AVX1-NEXT: pushl %esi
2193 ; X86-AVX1-NEXT: subl $16, %esp
2194 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %ecx
2195 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
2196 ; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
2197 ; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
2198 ; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
2199 ; X86-AVX1-NEXT: vmovd %xmm1, %eax
2200 ; X86-AVX1-NEXT: xorl %edx, %edx
2201 ; X86-AVX1-NEXT: divl 32(%ecx)
2202 ; X86-AVX1-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
2203 ; X86-AVX1-NEXT: vpextrd $3, %xmm2, %eax
2204 ; X86-AVX1-NEXT: vmovdqa (%ecx), %xmm3
2205 ; X86-AVX1-NEXT: vmovdqa 16(%ecx), %xmm1
2206 ; X86-AVX1-NEXT: vpextrd $3, %xmm3, %ecx
2207 ; X86-AVX1-NEXT: xorl %edx, %edx
2208 ; X86-AVX1-NEXT: divl %ecx
2209 ; X86-AVX1-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
2210 ; X86-AVX1-NEXT: vpextrd $2, %xmm2, %eax
2211 ; X86-AVX1-NEXT: vpextrd $2, %xmm3, %ecx
2212 ; X86-AVX1-NEXT: xorl %edx, %edx
2213 ; X86-AVX1-NEXT: divl %ecx
2214 ; X86-AVX1-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
2215 ; X86-AVX1-NEXT: vpextrd $1, %xmm2, %eax
2216 ; X86-AVX1-NEXT: vpextrd $1, %xmm3, %ecx
2217 ; X86-AVX1-NEXT: xorl %edx, %edx
2218 ; X86-AVX1-NEXT: divl %ecx
2219 ; X86-AVX1-NEXT: movl %edx, (%esp) # 4-byte Spill
2220 ; X86-AVX1-NEXT: vmovd %xmm2, %eax
2221 ; X86-AVX1-NEXT: vmovd %xmm3, %ecx
2222 ; X86-AVX1-NEXT: xorl %edx, %edx
2223 ; X86-AVX1-NEXT: divl %ecx
2224 ; X86-AVX1-NEXT: movl %edx, %ebp
2225 ; X86-AVX1-NEXT: vpextrd $3, %xmm0, %eax
2226 ; X86-AVX1-NEXT: vpextrd $3, %xmm1, %ecx
2227 ; X86-AVX1-NEXT: xorl %edx, %edx
2228 ; X86-AVX1-NEXT: divl %ecx
2229 ; X86-AVX1-NEXT: movl %edx, %ebx
2230 ; X86-AVX1-NEXT: vpextrd $2, %xmm0, %eax
2231 ; X86-AVX1-NEXT: vpextrd $2, %xmm1, %esi
2232 ; X86-AVX1-NEXT: xorl %edx, %edx
2233 ; X86-AVX1-NEXT: divl %esi
2234 ; X86-AVX1-NEXT: movl %edx, %esi
2235 ; X86-AVX1-NEXT: vpextrd $1, %xmm0, %eax
2236 ; X86-AVX1-NEXT: vpextrd $1, %xmm1, %edi
2237 ; X86-AVX1-NEXT: xorl %edx, %edx
2238 ; X86-AVX1-NEXT: divl %edi
2239 ; X86-AVX1-NEXT: movl %edx, %edi
2240 ; X86-AVX1-NEXT: vmovd %xmm0, %eax
2241 ; X86-AVX1-NEXT: vmovd %xmm1, %ecx
2242 ; X86-AVX1-NEXT: xorl %edx, %edx
2243 ; X86-AVX1-NEXT: divl %ecx
2244 ; X86-AVX1-NEXT: vmovd %edx, %xmm0
2245 ; X86-AVX1-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0
2246 ; X86-AVX1-NEXT: vpinsrd $2, %esi, %xmm0, %xmm0
2247 ; X86-AVX1-NEXT: vpinsrd $3, %ebx, %xmm0, %xmm0
2248 ; X86-AVX1-NEXT: vmovd %ebp, %xmm1
2249 ; X86-AVX1-NEXT: vpinsrd $1, (%esp), %xmm1, %xmm1 # 4-byte Folded Reload
2250 ; X86-AVX1-NEXT: vpinsrd $2, {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 4-byte Folded Reload
2251 ; X86-AVX1-NEXT: vpinsrd $3, {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 4-byte Folded Reload
2252 ; X86-AVX1-NEXT: imull $8199, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
2253 ; X86-AVX1-NEXT: # imm = 0x2007
2254 ; X86-AVX1-NEXT: movl %eax, (%eax)
2255 ; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [8199,8199,8199,8199]
2256 ; X86-AVX1-NEXT: vpmulld %xmm2, %xmm0, %xmm0
2257 ; X86-AVX1-NEXT: vpmulld %xmm2, %xmm1, %xmm1
2258 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
2259 ; X86-AVX1-NEXT: vmovaps %ymm0, (%eax)
2260 ; X86-AVX1-NEXT: addl $16, %esp
2261 ; X86-AVX1-NEXT: popl %esi
2262 ; X86-AVX1-NEXT: popl %edi
2263 ; X86-AVX1-NEXT: popl %ebx
2264 ; X86-AVX1-NEXT: popl %ebp
2265 ; X86-AVX1-NEXT: vzeroupper
2266 ; X86-AVX1-NEXT: retl
2268 ; X86-AVX2-LABEL: PR34947:
2269 ; X86-AVX2: # %bb.0:
2270 ; X86-AVX2-NEXT: pushl %edi
2271 ; X86-AVX2-NEXT: pushl %esi
2272 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %esi
2273 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
2274 ; X86-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
2275 ; X86-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
2276 ; X86-AVX2-NEXT: vmovdqa (%esi), %xmm2
2277 ; X86-AVX2-NEXT: vmovdqa 16(%esi), %xmm3
2278 ; X86-AVX2-NEXT: vpextrd $1, %xmm3, %ecx
2279 ; X86-AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
2280 ; X86-AVX2-NEXT: vpextrd $1, %xmm4, %eax
2281 ; X86-AVX2-NEXT: xorl %edx, %edx
2282 ; X86-AVX2-NEXT: divl %ecx
2283 ; X86-AVX2-NEXT: movl %edx, %ecx
2284 ; X86-AVX2-NEXT: vmovd %xmm3, %edi
2285 ; X86-AVX2-NEXT: vmovd %xmm4, %eax
2286 ; X86-AVX2-NEXT: xorl %edx, %edx
2287 ; X86-AVX2-NEXT: divl %edi
2288 ; X86-AVX2-NEXT: vmovd %edx, %xmm5
2289 ; X86-AVX2-NEXT: vpinsrd $1, %ecx, %xmm5, %xmm5
2290 ; X86-AVX2-NEXT: vpextrd $2, %xmm3, %ecx
2291 ; X86-AVX2-NEXT: vpextrd $2, %xmm4, %eax
2292 ; X86-AVX2-NEXT: xorl %edx, %edx
2293 ; X86-AVX2-NEXT: divl %ecx
2294 ; X86-AVX2-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
2295 ; X86-AVX2-NEXT: vpextrd $3, %xmm3, %ecx
2296 ; X86-AVX2-NEXT: vpextrd $3, %xmm4, %eax
2297 ; X86-AVX2-NEXT: xorl %edx, %edx
2298 ; X86-AVX2-NEXT: divl %ecx
2299 ; X86-AVX2-NEXT: vpinsrd $3, %edx, %xmm5, %xmm3
2300 ; X86-AVX2-NEXT: vpextrd $1, %xmm2, %ecx
2301 ; X86-AVX2-NEXT: vpextrd $1, %xmm1, %eax
2302 ; X86-AVX2-NEXT: xorl %edx, %edx
2303 ; X86-AVX2-NEXT: divl %ecx
2304 ; X86-AVX2-NEXT: movl %edx, %ecx
2305 ; X86-AVX2-NEXT: vmovd %xmm2, %edi
2306 ; X86-AVX2-NEXT: vmovd %xmm1, %eax
2307 ; X86-AVX2-NEXT: xorl %edx, %edx
2308 ; X86-AVX2-NEXT: divl %edi
2309 ; X86-AVX2-NEXT: vmovd %edx, %xmm4
2310 ; X86-AVX2-NEXT: vpinsrd $1, %ecx, %xmm4, %xmm4
2311 ; X86-AVX2-NEXT: vpextrd $2, %xmm2, %ecx
2312 ; X86-AVX2-NEXT: vpextrd $2, %xmm1, %eax
2313 ; X86-AVX2-NEXT: xorl %edx, %edx
2314 ; X86-AVX2-NEXT: divl %ecx
2315 ; X86-AVX2-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
2316 ; X86-AVX2-NEXT: vpextrd $3, %xmm2, %ecx
2317 ; X86-AVX2-NEXT: vpextrd $3, %xmm1, %eax
2318 ; X86-AVX2-NEXT: xorl %edx, %edx
2319 ; X86-AVX2-NEXT: divl %ecx
2320 ; X86-AVX2-NEXT: vpinsrd $3, %edx, %xmm4, %xmm1
2321 ; X86-AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
2322 ; X86-AVX2-NEXT: vmovd %xmm0, %eax
2323 ; X86-AVX2-NEXT: xorl %edx, %edx
2324 ; X86-AVX2-NEXT: divl 32(%esi)
2325 ; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} ymm0 = [8199,8199,8199,8199,8199,8199,8199,8199]
2326 ; X86-AVX2-NEXT: vpmulld %ymm0, %ymm1, %ymm0
2327 ; X86-AVX2-NEXT: imull $8199, %edx, %eax # imm = 0x2007
2328 ; X86-AVX2-NEXT: movl %eax, (%eax)
2329 ; X86-AVX2-NEXT: vmovdqa %ymm0, (%eax)
2330 ; X86-AVX2-NEXT: popl %esi
2331 ; X86-AVX2-NEXT: popl %edi
2332 ; X86-AVX2-NEXT: vzeroupper
2333 ; X86-AVX2-NEXT: retl
2335 ; X64-SSE-LABEL: PR34947:
2337 ; X64-SSE-NEXT: movdqa (%rdi), %xmm5
2338 ; X64-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
2339 ; X64-SSE-NEXT: movdqa (%rsi), %xmm2
2340 ; X64-SSE-NEXT: movdqa 16(%rsi), %xmm6
2341 ; X64-SSE-NEXT: pxor %xmm0, %xmm0
2342 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
2343 ; X64-SSE-NEXT: movdqa %xmm5, %xmm3
2344 ; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
2345 ; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
2346 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[3,1,2,3]
2347 ; X64-SSE-NEXT: movd %xmm0, %eax
2348 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[3,1,2,3]
2349 ; X64-SSE-NEXT: movd %xmm0, %ecx
2350 ; X64-SSE-NEXT: xorl %edx, %edx
2351 ; X64-SSE-NEXT: divl %ecx
2352 ; X64-SSE-NEXT: movd %edx, %xmm8
2353 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1]
2354 ; X64-SSE-NEXT: movd %xmm4, %eax
2355 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm6[2,3,0,1]
2356 ; X64-SSE-NEXT: movd %xmm4, %ecx
2357 ; X64-SSE-NEXT: xorl %edx, %edx
2358 ; X64-SSE-NEXT: divl %ecx
2359 ; X64-SSE-NEXT: movd %edx, %xmm7
2360 ; X64-SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
2361 ; X64-SSE-NEXT: movd %xmm5, %eax
2362 ; X64-SSE-NEXT: movd %xmm6, %ecx
2363 ; X64-SSE-NEXT: xorl %edx, %edx
2364 ; X64-SSE-NEXT: divl %ecx
2365 ; X64-SSE-NEXT: movd %edx, %xmm4
2366 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
2367 ; X64-SSE-NEXT: movd %xmm5, %eax
2368 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,2,3]
2369 ; X64-SSE-NEXT: movd %xmm5, %ecx
2370 ; X64-SSE-NEXT: xorl %edx, %edx
2371 ; X64-SSE-NEXT: divl %ecx
2372 ; X64-SSE-NEXT: movd %edx, %xmm5
2373 ; X64-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
2374 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm7[0]
2375 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[3,1,2,3]
2376 ; X64-SSE-NEXT: movd %xmm6, %eax
2377 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[3,1,2,3]
2378 ; X64-SSE-NEXT: movd %xmm6, %ecx
2379 ; X64-SSE-NEXT: xorl %edx, %edx
2380 ; X64-SSE-NEXT: divl %ecx
2381 ; X64-SSE-NEXT: movd %edx, %xmm6
2382 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[2,3,0,1]
2383 ; X64-SSE-NEXT: movd %xmm7, %eax
2384 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[2,3,0,1]
2385 ; X64-SSE-NEXT: movd %xmm7, %ecx
2386 ; X64-SSE-NEXT: xorl %edx, %edx
2387 ; X64-SSE-NEXT: divl %ecx
2388 ; X64-SSE-NEXT: movd %edx, %xmm7
2389 ; X64-SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
2390 ; X64-SSE-NEXT: movd %xmm3, %eax
2391 ; X64-SSE-NEXT: movd %xmm2, %ecx
2392 ; X64-SSE-NEXT: xorl %edx, %edx
2393 ; X64-SSE-NEXT: divl %ecx
2394 ; X64-SSE-NEXT: movd %edx, %xmm0
2395 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
2396 ; X64-SSE-NEXT: movd %xmm3, %eax
2397 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
2398 ; X64-SSE-NEXT: movd %xmm2, %ecx
2399 ; X64-SSE-NEXT: xorl %edx, %edx
2400 ; X64-SSE-NEXT: divl %ecx
2401 ; X64-SSE-NEXT: movd %edx, %xmm2
2402 ; X64-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
2403 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm7[0]
2404 ; X64-SSE-NEXT: movd %xmm1, %eax
2405 ; X64-SSE-NEXT: xorl %edx, %edx
2406 ; X64-SSE-NEXT: divl 32(%rsi)
2407 ; X64-SSE-NEXT: movdqa {{.*#+}} xmm1 = [8199,8199,8199,8199]
2408 ; X64-SSE-NEXT: pmuludq %xmm1, %xmm0
2409 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2410 ; X64-SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm6[0,0]
2411 ; X64-SSE-NEXT: pmuludq %xmm1, %xmm2
2412 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
2413 ; X64-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
2414 ; X64-SSE-NEXT: pmuludq %xmm1, %xmm4
2415 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
2416 ; X64-SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,0],xmm8[0,0]
2417 ; X64-SSE-NEXT: pmuludq %xmm1, %xmm5
2418 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,2,2,3]
2419 ; X64-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
2420 ; X64-SSE-NEXT: imull $8199, %edx, %eax # imm = 0x2007
2421 ; X64-SSE-NEXT: movl %eax, (%rax)
2422 ; X64-SSE-NEXT: movdqa %xmm2, (%rax)
2423 ; X64-SSE-NEXT: movdqa %xmm0, (%rax)
2424 ; X64-SSE-NEXT: retq
2426 ; X64-AVX1-LABEL: PR34947:
2427 ; X64-AVX1: # %bb.0:
2428 ; X64-AVX1-NEXT: pushq %rbp
2429 ; X64-AVX1-NEXT: pushq %rbx
2430 ; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
2431 ; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
2432 ; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
2433 ; X64-AVX1-NEXT: vmovd %xmm1, %eax
2434 ; X64-AVX1-NEXT: xorl %edx, %edx
2435 ; X64-AVX1-NEXT: divl 32(%rsi)
2436 ; X64-AVX1-NEXT: movl %edx, %r8d
2437 ; X64-AVX1-NEXT: vpextrd $3, %xmm2, %eax
2438 ; X64-AVX1-NEXT: vmovdqa (%rsi), %xmm3
2439 ; X64-AVX1-NEXT: vmovdqa 16(%rsi), %xmm1
2440 ; X64-AVX1-NEXT: vpextrd $3, %xmm3, %ecx
2441 ; X64-AVX1-NEXT: xorl %edx, %edx
2442 ; X64-AVX1-NEXT: divl %ecx
2443 ; X64-AVX1-NEXT: movl %edx, %r9d
2444 ; X64-AVX1-NEXT: vpextrd $2, %xmm2, %eax
2445 ; X64-AVX1-NEXT: vpextrd $2, %xmm3, %ecx
2446 ; X64-AVX1-NEXT: xorl %edx, %edx
2447 ; X64-AVX1-NEXT: divl %ecx
2448 ; X64-AVX1-NEXT: movl %edx, %r10d
2449 ; X64-AVX1-NEXT: vpextrd $1, %xmm2, %eax
2450 ; X64-AVX1-NEXT: vpextrd $1, %xmm3, %ecx
2451 ; X64-AVX1-NEXT: xorl %edx, %edx
2452 ; X64-AVX1-NEXT: divl %ecx
2453 ; X64-AVX1-NEXT: movl %edx, %r11d
2454 ; X64-AVX1-NEXT: vmovd %xmm2, %eax
2455 ; X64-AVX1-NEXT: vmovd %xmm3, %ecx
2456 ; X64-AVX1-NEXT: xorl %edx, %edx
2457 ; X64-AVX1-NEXT: divl %ecx
2458 ; X64-AVX1-NEXT: movl %edx, %esi
2459 ; X64-AVX1-NEXT: vpextrd $3, %xmm0, %eax
2460 ; X64-AVX1-NEXT: vpextrd $3, %xmm1, %ecx
2461 ; X64-AVX1-NEXT: xorl %edx, %edx
2462 ; X64-AVX1-NEXT: divl %ecx
2463 ; X64-AVX1-NEXT: movl %edx, %edi
2464 ; X64-AVX1-NEXT: vpextrd $2, %xmm0, %eax
2465 ; X64-AVX1-NEXT: vpextrd $2, %xmm1, %ecx
2466 ; X64-AVX1-NEXT: xorl %edx, %edx
2467 ; X64-AVX1-NEXT: divl %ecx
2468 ; X64-AVX1-NEXT: movl %edx, %ecx
2469 ; X64-AVX1-NEXT: vpextrd $1, %xmm0, %eax
2470 ; X64-AVX1-NEXT: vpextrd $1, %xmm1, %ebx
2471 ; X64-AVX1-NEXT: xorl %edx, %edx
2472 ; X64-AVX1-NEXT: divl %ebx
2473 ; X64-AVX1-NEXT: movl %edx, %ebx
2474 ; X64-AVX1-NEXT: vmovd %xmm0, %eax
2475 ; X64-AVX1-NEXT: vmovd %xmm1, %ebp
2476 ; X64-AVX1-NEXT: xorl %edx, %edx
2477 ; X64-AVX1-NEXT: divl %ebp
2478 ; X64-AVX1-NEXT: vmovd %edx, %xmm0
2479 ; X64-AVX1-NEXT: vpinsrd $1, %ebx, %xmm0, %xmm0
2480 ; X64-AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
2481 ; X64-AVX1-NEXT: vpinsrd $3, %edi, %xmm0, %xmm0
2482 ; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [8199,8199,8199,8199]
2483 ; X64-AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
2484 ; X64-AVX1-NEXT: vmovd %esi, %xmm2
2485 ; X64-AVX1-NEXT: vpinsrd $1, %r11d, %xmm2, %xmm2
2486 ; X64-AVX1-NEXT: vpinsrd $2, %r10d, %xmm2, %xmm2
2487 ; X64-AVX1-NEXT: vpinsrd $3, %r9d, %xmm2, %xmm2
2488 ; X64-AVX1-NEXT: vpmulld %xmm1, %xmm2, %xmm1
2489 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
2490 ; X64-AVX1-NEXT: imull $8199, %r8d, %eax # imm = 0x2007
2491 ; X64-AVX1-NEXT: movl %eax, (%rax)
2492 ; X64-AVX1-NEXT: vmovaps %ymm0, (%rax)
2493 ; X64-AVX1-NEXT: popq %rbx
2494 ; X64-AVX1-NEXT: popq %rbp
2495 ; X64-AVX1-NEXT: vzeroupper
2496 ; X64-AVX1-NEXT: retq
2498 ; X64-AVX2-LABEL: PR34947:
2499 ; X64-AVX2: # %bb.0:
2500 ; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
2501 ; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
2502 ; X64-AVX2-NEXT: vmovdqa (%rsi), %xmm2
2503 ; X64-AVX2-NEXT: vmovdqa 16(%rsi), %xmm3
2504 ; X64-AVX2-NEXT: vpextrd $1, %xmm3, %ecx
2505 ; X64-AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
2506 ; X64-AVX2-NEXT: vpextrd $1, %xmm4, %eax
2507 ; X64-AVX2-NEXT: xorl %edx, %edx
2508 ; X64-AVX2-NEXT: divl %ecx
2509 ; X64-AVX2-NEXT: movl %edx, %ecx
2510 ; X64-AVX2-NEXT: vmovd %xmm3, %edi
2511 ; X64-AVX2-NEXT: vmovd %xmm4, %eax
2512 ; X64-AVX2-NEXT: xorl %edx, %edx
2513 ; X64-AVX2-NEXT: divl %edi
2514 ; X64-AVX2-NEXT: vmovd %edx, %xmm5
2515 ; X64-AVX2-NEXT: vpinsrd $1, %ecx, %xmm5, %xmm5
2516 ; X64-AVX2-NEXT: vpextrd $2, %xmm3, %ecx
2517 ; X64-AVX2-NEXT: vpextrd $2, %xmm4, %eax
2518 ; X64-AVX2-NEXT: xorl %edx, %edx
2519 ; X64-AVX2-NEXT: divl %ecx
2520 ; X64-AVX2-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
2521 ; X64-AVX2-NEXT: vpextrd $3, %xmm3, %ecx
2522 ; X64-AVX2-NEXT: vpextrd $3, %xmm4, %eax
2523 ; X64-AVX2-NEXT: xorl %edx, %edx
2524 ; X64-AVX2-NEXT: divl %ecx
2525 ; X64-AVX2-NEXT: vpinsrd $3, %edx, %xmm5, %xmm3
2526 ; X64-AVX2-NEXT: vpextrd $1, %xmm2, %ecx
2527 ; X64-AVX2-NEXT: vpextrd $1, %xmm1, %eax
2528 ; X64-AVX2-NEXT: xorl %edx, %edx
2529 ; X64-AVX2-NEXT: divl %ecx
2530 ; X64-AVX2-NEXT: movl %edx, %ecx
2531 ; X64-AVX2-NEXT: vmovd %xmm2, %edi
2532 ; X64-AVX2-NEXT: vmovd %xmm1, %eax
2533 ; X64-AVX2-NEXT: xorl %edx, %edx
2534 ; X64-AVX2-NEXT: divl %edi
2535 ; X64-AVX2-NEXT: vmovd %edx, %xmm4
2536 ; X64-AVX2-NEXT: vpinsrd $1, %ecx, %xmm4, %xmm4
2537 ; X64-AVX2-NEXT: vpextrd $2, %xmm2, %ecx
2538 ; X64-AVX2-NEXT: vpextrd $2, %xmm1, %eax
2539 ; X64-AVX2-NEXT: xorl %edx, %edx
2540 ; X64-AVX2-NEXT: divl %ecx
2541 ; X64-AVX2-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
2542 ; X64-AVX2-NEXT: vpextrd $3, %xmm2, %ecx
2543 ; X64-AVX2-NEXT: vpextrd $3, %xmm1, %eax
2544 ; X64-AVX2-NEXT: xorl %edx, %edx
2545 ; X64-AVX2-NEXT: divl %ecx
2546 ; X64-AVX2-NEXT: vpinsrd $3, %edx, %xmm4, %xmm1
2547 ; X64-AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
2548 ; X64-AVX2-NEXT: vmovd %xmm0, %eax
2549 ; X64-AVX2-NEXT: xorl %edx, %edx
2550 ; X64-AVX2-NEXT: divl 32(%rsi)
2551 ; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} ymm0 = [8199,8199,8199,8199,8199,8199,8199,8199]
2552 ; X64-AVX2-NEXT: vpmulld %ymm0, %ymm1, %ymm0
2553 ; X64-AVX2-NEXT: imull $8199, %edx, %eax # imm = 0x2007
2554 ; X64-AVX2-NEXT: movl %eax, (%rax)
2555 ; X64-AVX2-NEXT: vmovdqa %ymm0, (%rax)
2556 ; X64-AVX2-NEXT: vzeroupper
2557 ; X64-AVX2-NEXT: retq
2558 %a0 = load <9 x i16>, <9 x i16>* %p0, align 64
2559 %a1 = load <9 x i32>, <9 x i32>* %p1, align 64
2560 %ext0 = zext <9 x i16> %a0 to <9 x i32>
2561 %rem = urem <9 x i32> %ext0, %a1
2562 %mul = mul <9 x i32> <i32 8199, i32 8199, i32 8199, i32 8199, i32 8199, i32 8199, i32 8199, i32 8199, i32 8199>, %rem
2563 store <9 x i32> %mul, <9 x i32>* undef, align 64