1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+mmx,+sse2 | FileCheck -check-prefix=X86 %s
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 | FileCheck -check-prefix=X64 %s
5 ;; A basic functional check to make sure that MMX arithmetic actually compiles.
6 ;; First is a straight translation of the original with bitcasts as needed.
8 define void @test0(ptr %A, ptr %B) nounwind {
10 ; X86: # %bb.0: # %entry
11 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
12 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
13 ; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
14 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
15 ; X86-NEXT: paddb %xmm0, %xmm1
16 ; X86-NEXT: movdq2q %xmm1, %mm0
17 ; X86-NEXT: movq %xmm1, (%eax)
18 ; X86-NEXT: paddsb (%ecx), %mm0
19 ; X86-NEXT: movq %mm0, (%eax)
20 ; X86-NEXT: paddusb (%ecx), %mm0
21 ; X86-NEXT: movq2dq %mm0, %xmm0
22 ; X86-NEXT: movq %mm0, (%eax)
23 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
24 ; X86-NEXT: psubb %xmm1, %xmm0
25 ; X86-NEXT: movdq2q %xmm0, %mm0
26 ; X86-NEXT: movq %xmm0, (%eax)
27 ; X86-NEXT: psubsb (%ecx), %mm0
28 ; X86-NEXT: movq %mm0, (%eax)
29 ; X86-NEXT: psubusb (%ecx), %mm0
30 ; X86-NEXT: movq2dq %mm0, %xmm0
31 ; X86-NEXT: movq %mm0, (%eax)
32 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
33 ; X86-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
34 ; X86-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
35 ; X86-NEXT: pmullw %xmm0, %xmm1
36 ; X86-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
37 ; X86-NEXT: packuswb %xmm1, %xmm1
38 ; X86-NEXT: movq %xmm1, (%eax)
39 ; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
40 ; X86-NEXT: pand %xmm1, %xmm0
41 ; X86-NEXT: movq %xmm0, (%eax)
42 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
43 ; X86-NEXT: por %xmm0, %xmm1
44 ; X86-NEXT: movq %xmm1, (%eax)
45 ; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
46 ; X86-NEXT: pxor %xmm1, %xmm0
47 ; X86-NEXT: movq %xmm0, (%eax)
52 ; X64: # %bb.0: # %entry
53 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
54 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
55 ; X64-NEXT: paddb %xmm0, %xmm1
56 ; X64-NEXT: movdq2q %xmm1, %mm0
57 ; X64-NEXT: movq %xmm1, (%rdi)
58 ; X64-NEXT: paddsb (%rsi), %mm0
59 ; X64-NEXT: movq %mm0, (%rdi)
60 ; X64-NEXT: paddusb (%rsi), %mm0
61 ; X64-NEXT: movq2dq %mm0, %xmm0
62 ; X64-NEXT: movq %mm0, (%rdi)
63 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
64 ; X64-NEXT: psubb %xmm1, %xmm0
65 ; X64-NEXT: movdq2q %xmm0, %mm0
66 ; X64-NEXT: movq %xmm0, (%rdi)
67 ; X64-NEXT: psubsb (%rsi), %mm0
68 ; X64-NEXT: movq %mm0, (%rdi)
69 ; X64-NEXT: psubusb (%rsi), %mm0
70 ; X64-NEXT: movq2dq %mm0, %xmm0
71 ; X64-NEXT: movq %mm0, (%rdi)
72 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
73 ; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
74 ; X64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
75 ; X64-NEXT: pmullw %xmm0, %xmm1
76 ; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
77 ; X64-NEXT: packuswb %xmm1, %xmm1
78 ; X64-NEXT: movq %xmm1, (%rdi)
79 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
80 ; X64-NEXT: pand %xmm1, %xmm0
81 ; X64-NEXT: movq %xmm0, (%rdi)
82 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
83 ; X64-NEXT: por %xmm0, %xmm1
84 ; X64-NEXT: movq %xmm1, (%rdi)
85 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
86 ; X64-NEXT: pxor %xmm1, %xmm0
87 ; X64-NEXT: movq %xmm0, (%rdi)
91 %tmp1 = load <1 x i64>, ptr %A
92 %tmp3 = load <1 x i64>, ptr %B
93 %tmp1a = bitcast <1 x i64> %tmp1 to <8 x i8>
94 %tmp3a = bitcast <1 x i64> %tmp3 to <8 x i8>
95 %tmp4 = add <8 x i8> %tmp1a, %tmp3a
96 %tmp4a = bitcast <8 x i8> %tmp4 to <1 x i64>
97 store <1 x i64> %tmp4a, ptr %A
98 %tmp7 = load <1 x i64>, ptr %B
99 %tmp12 = tail call <1 x i64> @llvm.x86.mmx.padds.b(<1 x i64> %tmp4a, <1 x i64> %tmp7)
100 store <1 x i64> %tmp12, ptr %A
101 %tmp16 = load <1 x i64>, ptr %B
102 %tmp21 = tail call <1 x i64> @llvm.x86.mmx.paddus.b(<1 x i64> %tmp12, <1 x i64> %tmp16)
103 store <1 x i64> %tmp21, ptr %A
104 %tmp27 = load <1 x i64>, ptr %B
105 %tmp21a = bitcast <1 x i64> %tmp21 to <8 x i8>
106 %tmp27a = bitcast <1 x i64> %tmp27 to <8 x i8>
107 %tmp28 = sub <8 x i8> %tmp21a, %tmp27a
108 %tmp28a = bitcast <8 x i8> %tmp28 to <1 x i64>
109 store <1 x i64> %tmp28a, ptr %A
110 %tmp31 = load <1 x i64>, ptr %B
111 %tmp36 = tail call <1 x i64> @llvm.x86.mmx.psubs.b(<1 x i64> %tmp28a, <1 x i64> %tmp31)
112 store <1 x i64> %tmp36, ptr %A
113 %tmp40 = load <1 x i64>, ptr %B
114 %tmp45 = tail call <1 x i64> @llvm.x86.mmx.psubus.b(<1 x i64> %tmp36, <1 x i64> %tmp40)
115 store <1 x i64> %tmp45, ptr %A
116 %tmp51 = load <1 x i64>, ptr %B
117 %tmp45a = bitcast <1 x i64> %tmp45 to <8 x i8>
118 %tmp51a = bitcast <1 x i64> %tmp51 to <8 x i8>
119 %tmp52 = mul <8 x i8> %tmp45a, %tmp51a
120 %tmp52a = bitcast <8 x i8> %tmp52 to <1 x i64>
121 store <1 x i64> %tmp52a, ptr %A
122 %tmp57 = load <1 x i64>, ptr %B
123 %tmp57a = bitcast <1 x i64> %tmp57 to <8 x i8>
124 %tmp58 = and <8 x i8> %tmp52, %tmp57a
125 %tmp58a = bitcast <8 x i8> %tmp58 to <1 x i64>
126 store <1 x i64> %tmp58a, ptr %A
127 %tmp63 = load <1 x i64>, ptr %B
128 %tmp63a = bitcast <1 x i64> %tmp63 to <8 x i8>
129 %tmp64 = or <8 x i8> %tmp58, %tmp63a
130 %tmp64a = bitcast <8 x i8> %tmp64 to <1 x i64>
131 store <1 x i64> %tmp64a, ptr %A
132 %tmp69 = load <1 x i64>, ptr %B
133 %tmp69a = bitcast <1 x i64> %tmp69 to <8 x i8>
134 %tmp64b = bitcast <1 x i64> %tmp64a to <8 x i8>
135 %tmp70 = xor <8 x i8> %tmp64b, %tmp69a
136 %tmp70a = bitcast <8 x i8> %tmp70 to <1 x i64>
137 store <1 x i64> %tmp70a, ptr %A
138 tail call void @llvm.x86.mmx.emms()
142 define void @test1(ptr %A, ptr %B) nounwind {
144 ; X86: # %bb.0: # %entry
145 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
146 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
147 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
148 ; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
149 ; X86-NEXT: paddd %xmm1, %xmm0
150 ; X86-NEXT: movq %xmm0, (%eax)
151 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
152 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
153 ; X86-NEXT: pmuludq %xmm1, %xmm0
154 ; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
155 ; X86-NEXT: pmuludq %xmm1, %xmm2
156 ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
157 ; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
158 ; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
159 ; X86-NEXT: movq %xmm0, (%eax)
160 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
161 ; X86-NEXT: pand %xmm0, %xmm1
162 ; X86-NEXT: movq %xmm1, (%eax)
163 ; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
164 ; X86-NEXT: por %xmm1, %xmm0
165 ; X86-NEXT: movq %xmm0, (%eax)
166 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
167 ; X86-NEXT: pxor %xmm0, %xmm1
168 ; X86-NEXT: movq %xmm1, (%eax)
173 ; X64: # %bb.0: # %entry
174 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
175 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
176 ; X64-NEXT: paddd %xmm0, %xmm1
177 ; X64-NEXT: movq %xmm1, (%rdi)
178 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
179 ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
180 ; X64-NEXT: pmuludq %xmm0, %xmm1
181 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
182 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
183 ; X64-NEXT: pmuludq %xmm2, %xmm0
184 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
185 ; X64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
186 ; X64-NEXT: movq %xmm1, (%rdi)
187 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
188 ; X64-NEXT: pand %xmm1, %xmm0
189 ; X64-NEXT: movq %xmm0, (%rdi)
190 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
191 ; X64-NEXT: por %xmm0, %xmm1
192 ; X64-NEXT: movq %xmm1, (%rdi)
193 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
194 ; X64-NEXT: pxor %xmm1, %xmm0
195 ; X64-NEXT: movq %xmm0, (%rdi)
199 %tmp1 = load <1 x i64>, ptr %A
200 %tmp3 = load <1 x i64>, ptr %B
201 %tmp1a = bitcast <1 x i64> %tmp1 to <2 x i32>
202 %tmp3a = bitcast <1 x i64> %tmp3 to <2 x i32>
203 %tmp4 = add <2 x i32> %tmp1a, %tmp3a
204 %tmp4a = bitcast <2 x i32> %tmp4 to <1 x i64>
205 store <1 x i64> %tmp4a, ptr %A
206 %tmp9 = load <1 x i64>, ptr %B
207 %tmp9a = bitcast <1 x i64> %tmp9 to <2 x i32>
208 %tmp10 = sub <2 x i32> %tmp4, %tmp9a
209 %tmp10a = bitcast <2 x i32> %tmp4 to <1 x i64>
210 store <1 x i64> %tmp10a, ptr %A
211 %tmp15 = load <1 x i64>, ptr %B
212 %tmp10b = bitcast <1 x i64> %tmp10a to <2 x i32>
213 %tmp15a = bitcast <1 x i64> %tmp15 to <2 x i32>
214 %tmp16 = mul <2 x i32> %tmp10b, %tmp15a
215 %tmp16a = bitcast <2 x i32> %tmp16 to <1 x i64>
216 store <1 x i64> %tmp16a, ptr %A
217 %tmp21 = load <1 x i64>, ptr %B
218 %tmp16b = bitcast <1 x i64> %tmp16a to <2 x i32>
219 %tmp21a = bitcast <1 x i64> %tmp21 to <2 x i32>
220 %tmp22 = and <2 x i32> %tmp16b, %tmp21a
221 %tmp22a = bitcast <2 x i32> %tmp22 to <1 x i64>
222 store <1 x i64> %tmp22a, ptr %A
223 %tmp27 = load <1 x i64>, ptr %B
224 %tmp22b = bitcast <1 x i64> %tmp22a to <2 x i32>
225 %tmp27a = bitcast <1 x i64> %tmp27 to <2 x i32>
226 %tmp28 = or <2 x i32> %tmp22b, %tmp27a
227 %tmp28a = bitcast <2 x i32> %tmp28 to <1 x i64>
228 store <1 x i64> %tmp28a, ptr %A
229 %tmp33 = load <1 x i64>, ptr %B
230 %tmp28b = bitcast <1 x i64> %tmp28a to <2 x i32>
231 %tmp33a = bitcast <1 x i64> %tmp33 to <2 x i32>
232 %tmp34 = xor <2 x i32> %tmp28b, %tmp33a
233 %tmp34a = bitcast <2 x i32> %tmp34 to <1 x i64>
234 store <1 x i64> %tmp34a, ptr %A
235 tail call void @llvm.x86.mmx.emms( )
239 define void @test2(ptr %A, ptr %B) nounwind {
241 ; X86: # %bb.0: # %entry
242 ; X86-NEXT: pushl %ebp
243 ; X86-NEXT: movl %esp, %ebp
244 ; X86-NEXT: pushl %esi
245 ; X86-NEXT: andl $-8, %esp
246 ; X86-NEXT: subl $16, %esp
247 ; X86-NEXT: movl 12(%ebp), %ecx
248 ; X86-NEXT: movl 8(%ebp), %eax
249 ; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
250 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
251 ; X86-NEXT: paddw %xmm0, %xmm1
252 ; X86-NEXT: movdq2q %xmm1, %mm0
253 ; X86-NEXT: movq %xmm1, (%eax)
254 ; X86-NEXT: paddsw (%ecx), %mm0
255 ; X86-NEXT: movq %mm0, (%eax)
256 ; X86-NEXT: paddusw (%ecx), %mm0
257 ; X86-NEXT: movq2dq %mm0, %xmm0
258 ; X86-NEXT: movq %mm0, (%eax)
259 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
260 ; X86-NEXT: psubw %xmm1, %xmm0
261 ; X86-NEXT: movdq2q %xmm0, %mm0
262 ; X86-NEXT: movq %xmm0, (%eax)
263 ; X86-NEXT: psubsw (%ecx), %mm0
264 ; X86-NEXT: movq %mm0, (%eax)
265 ; X86-NEXT: psubusw (%ecx), %mm0
266 ; X86-NEXT: movq2dq %mm0, %xmm0
267 ; X86-NEXT: movq %mm0, (%eax)
268 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
269 ; X86-NEXT: pmullw %xmm0, %xmm1
270 ; X86-NEXT: movdq2q %xmm1, %mm0
271 ; X86-NEXT: movq %xmm1, (%eax)
272 ; X86-NEXT: pmulhw (%ecx), %mm0
273 ; X86-NEXT: movq %mm0, (%eax)
274 ; X86-NEXT: pmaddwd (%ecx), %mm0
275 ; X86-NEXT: movq %mm0, (%esp)
276 ; X86-NEXT: movl (%esp), %edx
277 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
278 ; X86-NEXT: movq %mm0, (%eax)
279 ; X86-NEXT: andl 4(%ecx), %esi
280 ; X86-NEXT: movd %esi, %xmm0
281 ; X86-NEXT: andl (%ecx), %edx
282 ; X86-NEXT: movd %edx, %xmm1
283 ; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
284 ; X86-NEXT: movq %xmm1, (%eax)
285 ; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
286 ; X86-NEXT: por %xmm1, %xmm0
287 ; X86-NEXT: movq %xmm0, (%eax)
288 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
289 ; X86-NEXT: pxor %xmm0, %xmm1
290 ; X86-NEXT: movq %xmm1, (%eax)
292 ; X86-NEXT: leal -4(%ebp), %esp
293 ; X86-NEXT: popl %esi
294 ; X86-NEXT: popl %ebp
298 ; X64: # %bb.0: # %entry
299 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
300 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
301 ; X64-NEXT: paddw %xmm0, %xmm1
302 ; X64-NEXT: movdq2q %xmm1, %mm0
303 ; X64-NEXT: movq %xmm1, (%rdi)
304 ; X64-NEXT: paddsw (%rsi), %mm0
305 ; X64-NEXT: movq %mm0, (%rdi)
306 ; X64-NEXT: paddusw (%rsi), %mm0
307 ; X64-NEXT: movq2dq %mm0, %xmm0
308 ; X64-NEXT: movq %mm0, (%rdi)
309 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
310 ; X64-NEXT: psubw %xmm1, %xmm0
311 ; X64-NEXT: movdq2q %xmm0, %mm0
312 ; X64-NEXT: movq %xmm0, (%rdi)
313 ; X64-NEXT: psubsw (%rsi), %mm0
314 ; X64-NEXT: movq %mm0, (%rdi)
315 ; X64-NEXT: psubusw (%rsi), %mm0
316 ; X64-NEXT: movq2dq %mm0, %xmm0
317 ; X64-NEXT: movq %mm0, (%rdi)
318 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
319 ; X64-NEXT: pmullw %xmm0, %xmm1
320 ; X64-NEXT: movdq2q %xmm1, %mm0
321 ; X64-NEXT: movq %xmm1, (%rdi)
322 ; X64-NEXT: pmulhw (%rsi), %mm0
323 ; X64-NEXT: movq %mm0, (%rdi)
324 ; X64-NEXT: pmaddwd (%rsi), %mm0
325 ; X64-NEXT: movq %mm0, %rax
326 ; X64-NEXT: movq %mm0, (%rdi)
327 ; X64-NEXT: andq (%rsi), %rax
328 ; X64-NEXT: movq %rax, %xmm0
329 ; X64-NEXT: movq %rax, (%rdi)
330 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
331 ; X64-NEXT: por %xmm0, %xmm1
332 ; X64-NEXT: movq %xmm1, (%rdi)
333 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
334 ; X64-NEXT: pxor %xmm1, %xmm0
335 ; X64-NEXT: movq %xmm0, (%rdi)
339 %tmp1 = load <1 x i64>, ptr %A
340 %tmp3 = load <1 x i64>, ptr %B
341 %tmp1a = bitcast <1 x i64> %tmp1 to <4 x i16>
342 %tmp3a = bitcast <1 x i64> %tmp3 to <4 x i16>
343 %tmp4 = add <4 x i16> %tmp1a, %tmp3a
344 %tmp4a = bitcast <4 x i16> %tmp4 to <1 x i64>
345 store <1 x i64> %tmp4a, ptr %A
346 %tmp7 = load <1 x i64>, ptr %B
347 %tmp12 = tail call <1 x i64> @llvm.x86.mmx.padds.w(<1 x i64> %tmp4a, <1 x i64> %tmp7)
348 store <1 x i64> %tmp12, ptr %A
349 %tmp16 = load <1 x i64>, ptr %B
350 %tmp21 = tail call <1 x i64> @llvm.x86.mmx.paddus.w(<1 x i64> %tmp12, <1 x i64> %tmp16)
351 store <1 x i64> %tmp21, ptr %A
352 %tmp27 = load <1 x i64>, ptr %B
353 %tmp21a = bitcast <1 x i64> %tmp21 to <4 x i16>
354 %tmp27a = bitcast <1 x i64> %tmp27 to <4 x i16>
355 %tmp28 = sub <4 x i16> %tmp21a, %tmp27a
356 %tmp28a = bitcast <4 x i16> %tmp28 to <1 x i64>
357 store <1 x i64> %tmp28a, ptr %A
358 %tmp31 = load <1 x i64>, ptr %B
359 %tmp36 = tail call <1 x i64> @llvm.x86.mmx.psubs.w(<1 x i64> %tmp28a, <1 x i64> %tmp31)
360 store <1 x i64> %tmp36, ptr %A
361 %tmp40 = load <1 x i64>, ptr %B
362 %tmp45 = tail call <1 x i64> @llvm.x86.mmx.psubus.w(<1 x i64> %tmp36, <1 x i64> %tmp40)
363 store <1 x i64> %tmp45, ptr %A
364 %tmp51 = load <1 x i64>, ptr %B
365 %tmp45a = bitcast <1 x i64> %tmp45 to <4 x i16>
366 %tmp51a = bitcast <1 x i64> %tmp51 to <4 x i16>
367 %tmp52 = mul <4 x i16> %tmp45a, %tmp51a
368 %tmp52a = bitcast <4 x i16> %tmp52 to <1 x i64>
369 store <1 x i64> %tmp52a, ptr %A
370 %tmp55 = load <1 x i64>, ptr %B
371 %tmp60 = tail call <1 x i64> @llvm.x86.mmx.pmulh.w(<1 x i64> %tmp52a, <1 x i64> %tmp55)
372 store <1 x i64> %tmp60, ptr %A
373 %tmp64 = load <1 x i64>, ptr %B
374 %tmp69 = tail call <1 x i64> @llvm.x86.mmx.pmadd.wd(<1 x i64> %tmp60, <1 x i64> %tmp64)
375 store <1 x i64> %tmp69, ptr %A
376 %tmp75 = load <1 x i64>, ptr %B
377 %tmp70a = bitcast <1 x i64> %tmp69 to <4 x i16>
378 %tmp75a = bitcast <1 x i64> %tmp75 to <4 x i16>
379 %tmp76 = and <4 x i16> %tmp70a, %tmp75a
380 %tmp76a = bitcast <4 x i16> %tmp76 to <1 x i64>
381 store <1 x i64> %tmp76a, ptr %A
382 %tmp81 = load <1 x i64>, ptr %B
383 %tmp76b = bitcast <1 x i64> %tmp76a to <4 x i16>
384 %tmp81a = bitcast <1 x i64> %tmp81 to <4 x i16>
385 %tmp82 = or <4 x i16> %tmp76b, %tmp81a
386 %tmp82a = bitcast <4 x i16> %tmp82 to <1 x i64>
387 store <1 x i64> %tmp82a, ptr %A
388 %tmp87 = load <1 x i64>, ptr %B
389 %tmp82b = bitcast <1 x i64> %tmp82a to <4 x i16>
390 %tmp87a = bitcast <1 x i64> %tmp87 to <4 x i16>
391 %tmp88 = xor <4 x i16> %tmp82b, %tmp87a
392 %tmp88a = bitcast <4 x i16> %tmp88 to <1 x i64>
393 store <1 x i64> %tmp88a, ptr %A
394 tail call void @llvm.x86.mmx.emms( )
398 define <1 x i64> @test3(ptr %a, ptr %b, i32 %count) nounwind {
400 ; X86: # %bb.0: # %entry
401 ; X86-NEXT: pushl %ebp
402 ; X86-NEXT: pushl %ebx
403 ; X86-NEXT: pushl %edi
404 ; X86-NEXT: pushl %esi
405 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
406 ; X86-NEXT: testl %ecx, %ecx
407 ; X86-NEXT: je .LBB3_1
408 ; X86-NEXT: # %bb.2: # %bb26.preheader
409 ; X86-NEXT: xorl %ebx, %ebx
410 ; X86-NEXT: xorl %eax, %eax
411 ; X86-NEXT: xorl %edx, %edx
412 ; X86-NEXT: .p2align 4
413 ; X86-NEXT: .LBB3_3: # %bb26
414 ; X86-NEXT: # =>This Inner Loop Header: Depth=1
415 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
416 ; X86-NEXT: movl (%edi,%ebx,8), %ebp
417 ; X86-NEXT: movl %ecx, %esi
418 ; X86-NEXT: movl 4(%edi,%ebx,8), %ecx
419 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
420 ; X86-NEXT: addl (%edi,%ebx,8), %ebp
421 ; X86-NEXT: adcl 4(%edi,%ebx,8), %ecx
422 ; X86-NEXT: addl %ebp, %eax
423 ; X86-NEXT: adcl %ecx, %edx
424 ; X86-NEXT: movl %esi, %ecx
425 ; X86-NEXT: incl %ebx
426 ; X86-NEXT: cmpl %esi, %ebx
427 ; X86-NEXT: jb .LBB3_3
428 ; X86-NEXT: jmp .LBB3_4
430 ; X86-NEXT: xorl %eax, %eax
431 ; X86-NEXT: xorl %edx, %edx
432 ; X86-NEXT: .LBB3_4: # %bb31
433 ; X86-NEXT: popl %esi
434 ; X86-NEXT: popl %edi
435 ; X86-NEXT: popl %ebx
436 ; X86-NEXT: popl %ebp
440 ; X64: # %bb.0: # %entry
441 ; X64-NEXT: xorl %ecx, %ecx
442 ; X64-NEXT: xorl %eax, %eax
443 ; X64-NEXT: testl %edx, %edx
444 ; X64-NEXT: je .LBB3_2
445 ; X64-NEXT: .p2align 4
446 ; X64-NEXT: .LBB3_1: # %bb26
447 ; X64-NEXT: # =>This Inner Loop Header: Depth=1
448 ; X64-NEXT: movslq %ecx, %rcx
449 ; X64-NEXT: movq (%rdi,%rcx,8), %r8
450 ; X64-NEXT: addq (%rsi,%rcx,8), %r8
451 ; X64-NEXT: addq %r8, %rax
452 ; X64-NEXT: incl %ecx
453 ; X64-NEXT: cmpl %edx, %ecx
454 ; X64-NEXT: jb .LBB3_1
455 ; X64-NEXT: .LBB3_2: # %bb31
458 %tmp2942 = icmp eq i32 %count, 0
459 br i1 %tmp2942, label %bb31, label %bb26
462 %i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ]
463 %sum.035.0 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ]
464 %tmp13 = getelementptr <1 x i64>, ptr %b, i32 %i.037.0
465 %tmp14 = load <1 x i64>, ptr %tmp13
466 %tmp18 = getelementptr <1 x i64>, ptr %a, i32 %i.037.0
467 %tmp19 = load <1 x i64>, ptr %tmp18
468 %tmp21 = add <1 x i64> %tmp19, %tmp14
469 %tmp22 = add <1 x i64> %tmp21, %sum.035.0
470 %tmp25 = add i32 %i.037.0, 1
471 %tmp29 = icmp ult i32 %tmp25, %count
472 br i1 %tmp29, label %bb26, label %bb31
475 %sum.035.1 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ]
476 ret <1 x i64> %sum.035.1
479 ; There are no MMX operations here, so we use XMM or i64.
480 define void @ti8(double %a, double %b) nounwind {
482 ; X86: # %bb.0: # %entry
483 ; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
484 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
485 ; X86-NEXT: paddb %xmm0, %xmm1
486 ; X86-NEXT: movq %xmm1, 0
490 ; X64: # %bb.0: # %entry
491 ; X64-NEXT: paddb %xmm1, %xmm0
492 ; X64-NEXT: movq %xmm0, 0
495 %tmp1 = bitcast double %a to <8 x i8>
496 %tmp2 = bitcast double %b to <8 x i8>
497 %tmp3 = add <8 x i8> %tmp1, %tmp2
498 store <8 x i8> %tmp3, ptr null
502 define void @ti16(double %a, double %b) nounwind {
504 ; X86: # %bb.0: # %entry
505 ; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
506 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
507 ; X86-NEXT: paddw %xmm0, %xmm1
508 ; X86-NEXT: movq %xmm1, 0
512 ; X64: # %bb.0: # %entry
513 ; X64-NEXT: paddw %xmm1, %xmm0
514 ; X64-NEXT: movq %xmm0, 0
517 %tmp1 = bitcast double %a to <4 x i16>
518 %tmp2 = bitcast double %b to <4 x i16>
519 %tmp3 = add <4 x i16> %tmp1, %tmp2
520 store <4 x i16> %tmp3, ptr null
524 define void @ti32(double %a, double %b) nounwind {
526 ; X86: # %bb.0: # %entry
527 ; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
528 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
529 ; X86-NEXT: paddd %xmm0, %xmm1
530 ; X86-NEXT: movq %xmm1, 0
534 ; X64: # %bb.0: # %entry
535 ; X64-NEXT: paddd %xmm1, %xmm0
536 ; X64-NEXT: movq %xmm0, 0
539 %tmp1 = bitcast double %a to <2 x i32>
540 %tmp2 = bitcast double %b to <2 x i32>
541 %tmp3 = add <2 x i32> %tmp1, %tmp2
542 store <2 x i32> %tmp3, ptr null
546 define void @ti64(double %a, double %b) nounwind {
548 ; X86: # %bb.0: # %entry
549 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
550 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
551 ; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
552 ; X86-NEXT: adcl {{[0-9]+}}(%esp), %ecx
553 ; X86-NEXT: movl %eax, 0
554 ; X86-NEXT: movl %ecx, 4
558 ; X64: # %bb.0: # %entry
559 ; X64-NEXT: movq %xmm0, %rax
560 ; X64-NEXT: movq %xmm1, %rcx
561 ; X64-NEXT: addq %rax, %rcx
562 ; X64-NEXT: movq %rcx, 0
565 %tmp1 = bitcast double %a to <1 x i64>
566 %tmp2 = bitcast double %b to <1 x i64>
567 %tmp3 = add <1 x i64> %tmp1, %tmp2
568 store <1 x i64> %tmp3, ptr null
572 ; MMX intrinsics calls get us MMX instructions.
573 define void @ti8a(double %a, double %b) nounwind {
575 ; X86: # %bb.0: # %entry
576 ; X86-NEXT: movq {{[0-9]+}}(%esp), %mm0
577 ; X86-NEXT: paddb {{[0-9]+}}(%esp), %mm0
578 ; X86-NEXT: movq %mm0, 0
582 ; X64: # %bb.0: # %entry
583 ; X64-NEXT: movdq2q %xmm0, %mm0
584 ; X64-NEXT: movdq2q %xmm1, %mm1
585 ; X64-NEXT: paddb %mm0, %mm1
586 ; X64-NEXT: movq %mm1, 0
589 %tmp1 = bitcast double %a to <1 x i64>
590 %tmp2 = bitcast double %b to <1 x i64>
591 %tmp3 = tail call <1 x i64> @llvm.x86.mmx.padd.b(<1 x i64> %tmp1, <1 x i64> %tmp2)
592 store <1 x i64> %tmp3, ptr null
596 define void @ti16a(double %a, double %b) nounwind {
598 ; X86: # %bb.0: # %entry
599 ; X86-NEXT: movq {{[0-9]+}}(%esp), %mm0
600 ; X86-NEXT: paddw {{[0-9]+}}(%esp), %mm0
601 ; X86-NEXT: movq %mm0, 0
605 ; X64: # %bb.0: # %entry
606 ; X64-NEXT: movdq2q %xmm0, %mm0
607 ; X64-NEXT: movdq2q %xmm1, %mm1
608 ; X64-NEXT: paddw %mm0, %mm1
609 ; X64-NEXT: movq %mm1, 0
612 %tmp1 = bitcast double %a to <1 x i64>
613 %tmp2 = bitcast double %b to <1 x i64>
614 %tmp3 = tail call <1 x i64> @llvm.x86.mmx.padd.w(<1 x i64> %tmp1, <1 x i64> %tmp2)
615 store <1 x i64> %tmp3, ptr null
619 define void @ti32a(double %a, double %b) nounwind {
621 ; X86: # %bb.0: # %entry
622 ; X86-NEXT: movq {{[0-9]+}}(%esp), %mm0
623 ; X86-NEXT: paddd {{[0-9]+}}(%esp), %mm0
624 ; X86-NEXT: movq %mm0, 0
628 ; X64: # %bb.0: # %entry
629 ; X64-NEXT: movdq2q %xmm0, %mm0
630 ; X64-NEXT: movdq2q %xmm1, %mm1
631 ; X64-NEXT: paddd %mm0, %mm1
632 ; X64-NEXT: movq %mm1, 0
635 %tmp1 = bitcast double %a to <1 x i64>
636 %tmp2 = bitcast double %b to <1 x i64>
637 %tmp3 = tail call <1 x i64> @llvm.x86.mmx.padd.d(<1 x i64> %tmp1, <1 x i64> %tmp2)
638 store <1 x i64> %tmp3, ptr null
642 define void @ti64a(double %a, double %b) nounwind {
644 ; X86: # %bb.0: # %entry
645 ; X86-NEXT: movq {{[0-9]+}}(%esp), %mm0
646 ; X86-NEXT: paddq {{[0-9]+}}(%esp), %mm0
647 ; X86-NEXT: movq %mm0, 0
651 ; X64: # %bb.0: # %entry
652 ; X64-NEXT: movdq2q %xmm0, %mm0
653 ; X64-NEXT: movdq2q %xmm1, %mm1
654 ; X64-NEXT: paddq %mm0, %mm1
655 ; X64-NEXT: movq %mm1, 0
658 %tmp1 = bitcast double %a to <1 x i64>
659 %tmp2 = bitcast double %b to <1 x i64>
660 %tmp3 = tail call <1 x i64> @llvm.x86.mmx.padd.q(<1 x i64> %tmp1, <1 x i64> %tmp2)
661 store <1 x i64> %tmp3, ptr null
665 ; Make sure we clamp large shift amounts to 255
666 define i64 @pr43922() nounwind {
667 ; X86-LABEL: pr43922:
668 ; X86: # %bb.0: # %entry
669 ; X86-NEXT: pushl %ebp
670 ; X86-NEXT: movl %esp, %ebp
671 ; X86-NEXT: andl $-8, %esp
672 ; X86-NEXT: subl $8, %esp
673 ; X86-NEXT: movq {{\.?LCPI[0-9]+_[0-9]+}}, %mm0 # mm0 = 0x7AAAAAAA7AAAAAAA
674 ; X86-NEXT: psrad $255, %mm0
675 ; X86-NEXT: movq %mm0, (%esp)
676 ; X86-NEXT: movl (%esp), %eax
677 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
678 ; X86-NEXT: movl %ebp, %esp
679 ; X86-NEXT: popl %ebp
682 ; X64-LABEL: pr43922:
683 ; X64: # %bb.0: # %entry
684 ; X64-NEXT: movq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %mm0 # mm0 = 0x7AAAAAAA7AAAAAAA
685 ; X64-NEXT: psrad $255, %mm0
686 ; X64-NEXT: movq %mm0, %rax
689 %0 = tail call <1 x i64> @llvm.x86.mmx.psrai.d(<1 x i64> bitcast (<2 x i32> <i32 2058005162, i32 2058005162> to <1 x i64>), i32 268435456)
690 %1 = bitcast <1 x i64> %0 to i64
693 declare <1 x i64> @llvm.x86.mmx.psrai.d(<1 x i64>, i32)
695 declare <1 x i64> @llvm.x86.mmx.padd.b(<1 x i64>, <1 x i64>)
696 declare <1 x i64> @llvm.x86.mmx.padd.w(<1 x i64>, <1 x i64>)
697 declare <1 x i64> @llvm.x86.mmx.padd.d(<1 x i64>, <1 x i64>)
698 declare <1 x i64> @llvm.x86.mmx.padd.q(<1 x i64>, <1 x i64>)
700 declare <1 x i64> @llvm.x86.mmx.paddus.b(<1 x i64>, <1 x i64>)
701 declare <1 x i64> @llvm.x86.mmx.psubus.b(<1 x i64>, <1 x i64>)
702 declare <1 x i64> @llvm.x86.mmx.paddus.w(<1 x i64>, <1 x i64>)
703 declare <1 x i64> @llvm.x86.mmx.psubus.w(<1 x i64>, <1 x i64>)
704 declare <1 x i64> @llvm.x86.mmx.pmulh.w(<1 x i64>, <1 x i64>)
705 declare <1 x i64> @llvm.x86.mmx.pmadd.wd(<1 x i64>, <1 x i64>)
707 declare void @llvm.x86.mmx.emms()
709 declare <1 x i64> @llvm.x86.mmx.padds.b(<1 x i64>, <1 x i64>)
710 declare <1 x i64> @llvm.x86.mmx.padds.w(<1 x i64>, <1 x i64>)
711 declare <1 x i64> @llvm.x86.mmx.psubs.b(<1 x i64>, <1 x i64>)
712 declare <1 x i64> @llvm.x86.mmx.psubs.w(<1 x i64>, <1 x i64>)