1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+mmx,+sse2 | FileCheck -check-prefix=X32 %s
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 | FileCheck -check-prefix=X64 %s
5 ;; A basic functional check to make sure that MMX arithmetic actually compiles.
6 ;; First is a straight translation of the original with bitcasts as needed.
8 define void @test0(ptr %A, ptr %B) {
10 ; X32: # %bb.0: # %entry
11 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
12 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
13 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
14 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
15 ; X32-NEXT: paddb %xmm0, %xmm1
16 ; X32-NEXT: movdq2q %xmm1, %mm0
17 ; X32-NEXT: movq %xmm1, (%eax)
18 ; X32-NEXT: paddsb (%ecx), %mm0
19 ; X32-NEXT: movq %mm0, (%eax)
20 ; X32-NEXT: paddusb (%ecx), %mm0
21 ; X32-NEXT: movq %mm0, (%eax)
22 ; X32-NEXT: movq2dq %mm0, %xmm0
23 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
24 ; X32-NEXT: psubb %xmm1, %xmm0
25 ; X32-NEXT: movdq2q %xmm0, %mm0
26 ; X32-NEXT: movq %xmm0, (%eax)
27 ; X32-NEXT: psubsb (%ecx), %mm0
28 ; X32-NEXT: movq %mm0, (%eax)
29 ; X32-NEXT: psubusb (%ecx), %mm0
30 ; X32-NEXT: movq %mm0, (%eax)
31 ; X32-NEXT: movq2dq %mm0, %xmm0
32 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
33 ; X32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
34 ; X32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
35 ; X32-NEXT: pmullw %xmm0, %xmm1
36 ; X32-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
37 ; X32-NEXT: packuswb %xmm1, %xmm1
38 ; X32-NEXT: movq %xmm1, (%eax)
39 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
40 ; X32-NEXT: pand %xmm1, %xmm0
41 ; X32-NEXT: movq %xmm0, (%eax)
42 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
43 ; X32-NEXT: por %xmm0, %xmm1
44 ; X32-NEXT: movq %xmm1, (%eax)
45 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
46 ; X32-NEXT: pxor %xmm1, %xmm0
47 ; X32-NEXT: movq %xmm0, (%eax)
52 ; X64: # %bb.0: # %entry
53 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
54 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
55 ; X64-NEXT: paddb %xmm0, %xmm1
56 ; X64-NEXT: movdq2q %xmm1, %mm0
57 ; X64-NEXT: movq %xmm1, (%rdi)
58 ; X64-NEXT: paddsb (%rsi), %mm0
59 ; X64-NEXT: movq %mm0, (%rdi)
60 ; X64-NEXT: paddusb (%rsi), %mm0
61 ; X64-NEXT: movq %mm0, (%rdi)
62 ; X64-NEXT: movq2dq %mm0, %xmm0
63 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
64 ; X64-NEXT: psubb %xmm1, %xmm0
65 ; X64-NEXT: movdq2q %xmm0, %mm0
66 ; X64-NEXT: movq %xmm0, (%rdi)
67 ; X64-NEXT: psubsb (%rsi), %mm0
68 ; X64-NEXT: movq %mm0, (%rdi)
69 ; X64-NEXT: psubusb (%rsi), %mm0
70 ; X64-NEXT: movq %mm0, (%rdi)
71 ; X64-NEXT: movq2dq %mm0, %xmm0
72 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
73 ; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
74 ; X64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
75 ; X64-NEXT: pmullw %xmm0, %xmm1
76 ; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
77 ; X64-NEXT: packuswb %xmm1, %xmm1
78 ; X64-NEXT: movq %xmm1, (%rdi)
79 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
80 ; X64-NEXT: pand %xmm1, %xmm0
81 ; X64-NEXT: movq %xmm0, (%rdi)
82 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
83 ; X64-NEXT: por %xmm0, %xmm1
84 ; X64-NEXT: movq %xmm1, (%rdi)
85 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
86 ; X64-NEXT: pxor %xmm1, %xmm0
87 ; X64-NEXT: movq %xmm0, (%rdi)
91 %tmp1 = load x86_mmx, ptr %A
92 %tmp3 = load x86_mmx, ptr %B
93 %tmp1a = bitcast x86_mmx %tmp1 to <8 x i8>
94 %tmp3a = bitcast x86_mmx %tmp3 to <8 x i8>
95 %tmp4 = add <8 x i8> %tmp1a, %tmp3a
96 %tmp4a = bitcast <8 x i8> %tmp4 to x86_mmx
97 store x86_mmx %tmp4a, ptr %A
98 %tmp7 = load x86_mmx, ptr %B
99 %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b(x86_mmx %tmp4a, x86_mmx %tmp7)
100 store x86_mmx %tmp12, ptr %A
101 %tmp16 = load x86_mmx, ptr %B
102 %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx %tmp12, x86_mmx %tmp16)
103 store x86_mmx %tmp21, ptr %A
104 %tmp27 = load x86_mmx, ptr %B
105 %tmp21a = bitcast x86_mmx %tmp21 to <8 x i8>
106 %tmp27a = bitcast x86_mmx %tmp27 to <8 x i8>
107 %tmp28 = sub <8 x i8> %tmp21a, %tmp27a
108 %tmp28a = bitcast <8 x i8> %tmp28 to x86_mmx
109 store x86_mmx %tmp28a, ptr %A
110 %tmp31 = load x86_mmx, ptr %B
111 %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx %tmp28a, x86_mmx %tmp31)
112 store x86_mmx %tmp36, ptr %A
113 %tmp40 = load x86_mmx, ptr %B
114 %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx %tmp36, x86_mmx %tmp40)
115 store x86_mmx %tmp45, ptr %A
116 %tmp51 = load x86_mmx, ptr %B
117 %tmp45a = bitcast x86_mmx %tmp45 to <8 x i8>
118 %tmp51a = bitcast x86_mmx %tmp51 to <8 x i8>
119 %tmp52 = mul <8 x i8> %tmp45a, %tmp51a
120 %tmp52a = bitcast <8 x i8> %tmp52 to x86_mmx
121 store x86_mmx %tmp52a, ptr %A
122 %tmp57 = load x86_mmx, ptr %B
123 %tmp57a = bitcast x86_mmx %tmp57 to <8 x i8>
124 %tmp58 = and <8 x i8> %tmp52, %tmp57a
125 %tmp58a = bitcast <8 x i8> %tmp58 to x86_mmx
126 store x86_mmx %tmp58a, ptr %A
127 %tmp63 = load x86_mmx, ptr %B
128 %tmp63a = bitcast x86_mmx %tmp63 to <8 x i8>
129 %tmp64 = or <8 x i8> %tmp58, %tmp63a
130 %tmp64a = bitcast <8 x i8> %tmp64 to x86_mmx
131 store x86_mmx %tmp64a, ptr %A
132 %tmp69 = load x86_mmx, ptr %B
133 %tmp69a = bitcast x86_mmx %tmp69 to <8 x i8>
134 %tmp64b = bitcast x86_mmx %tmp64a to <8 x i8>
135 %tmp70 = xor <8 x i8> %tmp64b, %tmp69a
136 %tmp70a = bitcast <8 x i8> %tmp70 to x86_mmx
137 store x86_mmx %tmp70a, ptr %A
138 tail call void @llvm.x86.mmx.emms()
142 define void @test1(ptr %A, ptr %B) {
144 ; X32: # %bb.0: # %entry
145 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
146 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
147 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
148 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
149 ; X32-NEXT: paddd %xmm1, %xmm0
150 ; X32-NEXT: movq %xmm0, (%eax)
151 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
152 ; X32-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
153 ; X32-NEXT: pmuludq %xmm1, %xmm0
154 ; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
155 ; X32-NEXT: pmuludq %xmm1, %xmm2
156 ; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
157 ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
158 ; X32-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
159 ; X32-NEXT: movq %xmm0, (%eax)
160 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
161 ; X32-NEXT: pand %xmm0, %xmm1
162 ; X32-NEXT: movq %xmm1, (%eax)
163 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
164 ; X32-NEXT: por %xmm1, %xmm0
165 ; X32-NEXT: movq %xmm0, (%eax)
166 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
167 ; X32-NEXT: pxor %xmm0, %xmm1
168 ; X32-NEXT: movq %xmm1, (%eax)
173 ; X64: # %bb.0: # %entry
174 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
175 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
176 ; X64-NEXT: paddd %xmm0, %xmm1
177 ; X64-NEXT: movq %xmm1, (%rdi)
178 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
179 ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
180 ; X64-NEXT: pmuludq %xmm0, %xmm1
181 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
182 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
183 ; X64-NEXT: pmuludq %xmm2, %xmm0
184 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
185 ; X64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
186 ; X64-NEXT: movq %xmm1, (%rdi)
187 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
188 ; X64-NEXT: pand %xmm1, %xmm0
189 ; X64-NEXT: movq %xmm0, (%rdi)
190 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
191 ; X64-NEXT: por %xmm0, %xmm1
192 ; X64-NEXT: movq %xmm1, (%rdi)
193 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
194 ; X64-NEXT: pxor %xmm1, %xmm0
195 ; X64-NEXT: movq %xmm0, (%rdi)
199 %tmp1 = load x86_mmx, ptr %A
200 %tmp3 = load x86_mmx, ptr %B
201 %tmp1a = bitcast x86_mmx %tmp1 to <2 x i32>
202 %tmp3a = bitcast x86_mmx %tmp3 to <2 x i32>
203 %tmp4 = add <2 x i32> %tmp1a, %tmp3a
204 %tmp4a = bitcast <2 x i32> %tmp4 to x86_mmx
205 store x86_mmx %tmp4a, ptr %A
206 %tmp9 = load x86_mmx, ptr %B
207 %tmp9a = bitcast x86_mmx %tmp9 to <2 x i32>
208 %tmp10 = sub <2 x i32> %tmp4, %tmp9a
209 %tmp10a = bitcast <2 x i32> %tmp4 to x86_mmx
210 store x86_mmx %tmp10a, ptr %A
211 %tmp15 = load x86_mmx, ptr %B
212 %tmp10b = bitcast x86_mmx %tmp10a to <2 x i32>
213 %tmp15a = bitcast x86_mmx %tmp15 to <2 x i32>
214 %tmp16 = mul <2 x i32> %tmp10b, %tmp15a
215 %tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx
216 store x86_mmx %tmp16a, ptr %A
217 %tmp21 = load x86_mmx, ptr %B
218 %tmp16b = bitcast x86_mmx %tmp16a to <2 x i32>
219 %tmp21a = bitcast x86_mmx %tmp21 to <2 x i32>
220 %tmp22 = and <2 x i32> %tmp16b, %tmp21a
221 %tmp22a = bitcast <2 x i32> %tmp22 to x86_mmx
222 store x86_mmx %tmp22a, ptr %A
223 %tmp27 = load x86_mmx, ptr %B
224 %tmp22b = bitcast x86_mmx %tmp22a to <2 x i32>
225 %tmp27a = bitcast x86_mmx %tmp27 to <2 x i32>
226 %tmp28 = or <2 x i32> %tmp22b, %tmp27a
227 %tmp28a = bitcast <2 x i32> %tmp28 to x86_mmx
228 store x86_mmx %tmp28a, ptr %A
229 %tmp33 = load x86_mmx, ptr %B
230 %tmp28b = bitcast x86_mmx %tmp28a to <2 x i32>
231 %tmp33a = bitcast x86_mmx %tmp33 to <2 x i32>
232 %tmp34 = xor <2 x i32> %tmp28b, %tmp33a
233 %tmp34a = bitcast <2 x i32> %tmp34 to x86_mmx
234 store x86_mmx %tmp34a, ptr %A
235 tail call void @llvm.x86.mmx.emms( )
239 define void @test2(ptr %A, ptr %B) {
241 ; X32: # %bb.0: # %entry
242 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
243 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
244 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
245 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
246 ; X32-NEXT: paddw %xmm0, %xmm1
247 ; X32-NEXT: movdq2q %xmm1, %mm0
248 ; X32-NEXT: movq %xmm1, (%eax)
249 ; X32-NEXT: paddsw (%ecx), %mm0
250 ; X32-NEXT: movq %mm0, (%eax)
251 ; X32-NEXT: paddusw (%ecx), %mm0
252 ; X32-NEXT: movq %mm0, (%eax)
253 ; X32-NEXT: movq2dq %mm0, %xmm0
254 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
255 ; X32-NEXT: psubw %xmm1, %xmm0
256 ; X32-NEXT: movdq2q %xmm0, %mm0
257 ; X32-NEXT: movq %xmm0, (%eax)
258 ; X32-NEXT: psubsw (%ecx), %mm0
259 ; X32-NEXT: movq %mm0, (%eax)
260 ; X32-NEXT: psubusw (%ecx), %mm0
261 ; X32-NEXT: movq %mm0, (%eax)
262 ; X32-NEXT: movq2dq %mm0, %xmm0
263 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
264 ; X32-NEXT: pmullw %xmm0, %xmm1
265 ; X32-NEXT: movdq2q %xmm1, %mm0
266 ; X32-NEXT: movq %xmm1, (%eax)
267 ; X32-NEXT: pmulhw (%ecx), %mm0
268 ; X32-NEXT: movq %mm0, (%eax)
269 ; X32-NEXT: pmaddwd (%ecx), %mm0
270 ; X32-NEXT: movq %mm0, (%eax)
271 ; X32-NEXT: movq2dq %mm0, %xmm0
272 ; X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
273 ; X32-NEXT: andps %xmm0, %xmm1
274 ; X32-NEXT: movlps %xmm1, (%eax)
275 ; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
276 ; X32-NEXT: orps %xmm1, %xmm0
277 ; X32-NEXT: movlps %xmm0, (%eax)
278 ; X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
279 ; X32-NEXT: xorps %xmm0, %xmm1
280 ; X32-NEXT: movlps %xmm1, (%eax)
285 ; X64: # %bb.0: # %entry
286 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
287 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
288 ; X64-NEXT: paddw %xmm0, %xmm1
289 ; X64-NEXT: movdq2q %xmm1, %mm0
290 ; X64-NEXT: movq %xmm1, (%rdi)
291 ; X64-NEXT: paddsw (%rsi), %mm0
292 ; X64-NEXT: movq %mm0, (%rdi)
293 ; X64-NEXT: paddusw (%rsi), %mm0
294 ; X64-NEXT: movq %mm0, (%rdi)
295 ; X64-NEXT: movq2dq %mm0, %xmm0
296 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
297 ; X64-NEXT: psubw %xmm1, %xmm0
298 ; X64-NEXT: movdq2q %xmm0, %mm0
299 ; X64-NEXT: movq %xmm0, (%rdi)
300 ; X64-NEXT: psubsw (%rsi), %mm0
301 ; X64-NEXT: movq %mm0, (%rdi)
302 ; X64-NEXT: psubusw (%rsi), %mm0
303 ; X64-NEXT: movq %mm0, (%rdi)
304 ; X64-NEXT: movq2dq %mm0, %xmm0
305 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
306 ; X64-NEXT: pmullw %xmm0, %xmm1
307 ; X64-NEXT: movdq2q %xmm1, %mm0
308 ; X64-NEXT: movq %xmm1, (%rdi)
309 ; X64-NEXT: pmulhw (%rsi), %mm0
310 ; X64-NEXT: movq %mm0, (%rdi)
311 ; X64-NEXT: pmaddwd (%rsi), %mm0
312 ; X64-NEXT: movq %mm0, (%rdi)
313 ; X64-NEXT: movq2dq %mm0, %xmm0
314 ; X64-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
315 ; X64-NEXT: andps %xmm0, %xmm1
316 ; X64-NEXT: movlps %xmm1, (%rdi)
317 ; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
318 ; X64-NEXT: orps %xmm1, %xmm0
319 ; X64-NEXT: movlps %xmm0, (%rdi)
320 ; X64-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
321 ; X64-NEXT: xorps %xmm0, %xmm1
322 ; X64-NEXT: movlps %xmm1, (%rdi)
326 %tmp1 = load x86_mmx, ptr %A
327 %tmp3 = load x86_mmx, ptr %B
328 %tmp1a = bitcast x86_mmx %tmp1 to <4 x i16>
329 %tmp3a = bitcast x86_mmx %tmp3 to <4 x i16>
330 %tmp4 = add <4 x i16> %tmp1a, %tmp3a
331 %tmp4a = bitcast <4 x i16> %tmp4 to x86_mmx
332 store x86_mmx %tmp4a, ptr %A
333 %tmp7 = load x86_mmx, ptr %B
334 %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w(x86_mmx %tmp4a, x86_mmx %tmp7)
335 store x86_mmx %tmp12, ptr %A
336 %tmp16 = load x86_mmx, ptr %B
337 %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %tmp12, x86_mmx %tmp16)
338 store x86_mmx %tmp21, ptr %A
339 %tmp27 = load x86_mmx, ptr %B
340 %tmp21a = bitcast x86_mmx %tmp21 to <4 x i16>
341 %tmp27a = bitcast x86_mmx %tmp27 to <4 x i16>
342 %tmp28 = sub <4 x i16> %tmp21a, %tmp27a
343 %tmp28a = bitcast <4 x i16> %tmp28 to x86_mmx
344 store x86_mmx %tmp28a, ptr %A
345 %tmp31 = load x86_mmx, ptr %B
346 %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx %tmp28a, x86_mmx %tmp31)
347 store x86_mmx %tmp36, ptr %A
348 %tmp40 = load x86_mmx, ptr %B
349 %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx %tmp36, x86_mmx %tmp40)
350 store x86_mmx %tmp45, ptr %A
351 %tmp51 = load x86_mmx, ptr %B
352 %tmp45a = bitcast x86_mmx %tmp45 to <4 x i16>
353 %tmp51a = bitcast x86_mmx %tmp51 to <4 x i16>
354 %tmp52 = mul <4 x i16> %tmp45a, %tmp51a
355 %tmp52a = bitcast <4 x i16> %tmp52 to x86_mmx
356 store x86_mmx %tmp52a, ptr %A
357 %tmp55 = load x86_mmx, ptr %B
358 %tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx %tmp52a, x86_mmx %tmp55)
359 store x86_mmx %tmp60, ptr %A
360 %tmp64 = load x86_mmx, ptr %B
361 %tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx %tmp60, x86_mmx %tmp64)
362 %tmp70 = bitcast x86_mmx %tmp69 to x86_mmx
363 store x86_mmx %tmp70, ptr %A
364 %tmp75 = load x86_mmx, ptr %B
365 %tmp70a = bitcast x86_mmx %tmp70 to <4 x i16>
366 %tmp75a = bitcast x86_mmx %tmp75 to <4 x i16>
367 %tmp76 = and <4 x i16> %tmp70a, %tmp75a
368 %tmp76a = bitcast <4 x i16> %tmp76 to x86_mmx
369 store x86_mmx %tmp76a, ptr %A
370 %tmp81 = load x86_mmx, ptr %B
371 %tmp76b = bitcast x86_mmx %tmp76a to <4 x i16>
372 %tmp81a = bitcast x86_mmx %tmp81 to <4 x i16>
373 %tmp82 = or <4 x i16> %tmp76b, %tmp81a
374 %tmp82a = bitcast <4 x i16> %tmp82 to x86_mmx
375 store x86_mmx %tmp82a, ptr %A
376 %tmp87 = load x86_mmx, ptr %B
377 %tmp82b = bitcast x86_mmx %tmp82a to <4 x i16>
378 %tmp87a = bitcast x86_mmx %tmp87 to <4 x i16>
379 %tmp88 = xor <4 x i16> %tmp82b, %tmp87a
380 %tmp88a = bitcast <4 x i16> %tmp88 to x86_mmx
381 store x86_mmx %tmp88a, ptr %A
382 tail call void @llvm.x86.mmx.emms( )
386 define <1 x i64> @test3(ptr %a, ptr %b, i32 %count) nounwind {
388 ; X32: # %bb.0: # %entry
389 ; X32-NEXT: pushl %ebp
390 ; X32-NEXT: pushl %ebx
391 ; X32-NEXT: pushl %edi
392 ; X32-NEXT: pushl %esi
393 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
394 ; X32-NEXT: testl %ecx, %ecx
395 ; X32-NEXT: je .LBB3_1
396 ; X32-NEXT: # %bb.2: # %bb26.preheader
397 ; X32-NEXT: xorl %ebx, %ebx
398 ; X32-NEXT: xorl %eax, %eax
399 ; X32-NEXT: xorl %edx, %edx
400 ; X32-NEXT: .p2align 4, 0x90
401 ; X32-NEXT: .LBB3_3: # %bb26
402 ; X32-NEXT: # =>This Inner Loop Header: Depth=1
403 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
404 ; X32-NEXT: movl (%edi,%ebx,8), %ebp
405 ; X32-NEXT: movl %ecx, %esi
406 ; X32-NEXT: movl 4(%edi,%ebx,8), %ecx
407 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
408 ; X32-NEXT: addl (%edi,%ebx,8), %ebp
409 ; X32-NEXT: adcl 4(%edi,%ebx,8), %ecx
410 ; X32-NEXT: addl %ebp, %eax
411 ; X32-NEXT: adcl %ecx, %edx
412 ; X32-NEXT: movl %esi, %ecx
413 ; X32-NEXT: incl %ebx
414 ; X32-NEXT: cmpl %esi, %ebx
415 ; X32-NEXT: jb .LBB3_3
416 ; X32-NEXT: jmp .LBB3_4
418 ; X32-NEXT: xorl %eax, %eax
419 ; X32-NEXT: xorl %edx, %edx
420 ; X32-NEXT: .LBB3_4: # %bb31
421 ; X32-NEXT: popl %esi
422 ; X32-NEXT: popl %edi
423 ; X32-NEXT: popl %ebx
424 ; X32-NEXT: popl %ebp
428 ; X64: # %bb.0: # %entry
429 ; X64-NEXT: xorl %ecx, %ecx
430 ; X64-NEXT: xorl %eax, %eax
431 ; X64-NEXT: testl %edx, %edx
432 ; X64-NEXT: je .LBB3_2
433 ; X64-NEXT: .p2align 4, 0x90
434 ; X64-NEXT: .LBB3_1: # %bb26
435 ; X64-NEXT: # =>This Inner Loop Header: Depth=1
436 ; X64-NEXT: movslq %ecx, %rcx
437 ; X64-NEXT: movq (%rdi,%rcx,8), %r8
438 ; X64-NEXT: addq (%rsi,%rcx,8), %r8
439 ; X64-NEXT: addq %r8, %rax
440 ; X64-NEXT: incl %ecx
441 ; X64-NEXT: cmpl %edx, %ecx
442 ; X64-NEXT: jb .LBB3_1
443 ; X64-NEXT: .LBB3_2: # %bb31
446 %tmp2942 = icmp eq i32 %count, 0
447 br i1 %tmp2942, label %bb31, label %bb26
450 %i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ]
451 %sum.035.0 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ]
452 %tmp13 = getelementptr <1 x i64>, ptr %b, i32 %i.037.0
453 %tmp14 = load <1 x i64>, ptr %tmp13
454 %tmp18 = getelementptr <1 x i64>, ptr %a, i32 %i.037.0
455 %tmp19 = load <1 x i64>, ptr %tmp18
456 %tmp21 = add <1 x i64> %tmp19, %tmp14
457 %tmp22 = add <1 x i64> %tmp21, %sum.035.0
458 %tmp25 = add i32 %i.037.0, 1
459 %tmp29 = icmp ult i32 %tmp25, %count
460 br i1 %tmp29, label %bb26, label %bb31
463 %sum.035.1 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ]
464 ret <1 x i64> %sum.035.1
467 ; There are no MMX operations here, so we use XMM or i64.
468 define void @ti8(double %a, double %b) nounwind {
470 ; X32: # %bb.0: # %entry
471 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
472 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
473 ; X32-NEXT: paddb %xmm0, %xmm1
474 ; X32-NEXT: movq %xmm1, 0
478 ; X64: # %bb.0: # %entry
479 ; X64-NEXT: paddb %xmm1, %xmm0
480 ; X64-NEXT: movq %xmm0, 0
483 %tmp1 = bitcast double %a to <8 x i8>
484 %tmp2 = bitcast double %b to <8 x i8>
485 %tmp3 = add <8 x i8> %tmp1, %tmp2
486 store <8 x i8> %tmp3, ptr null
490 define void @ti16(double %a, double %b) nounwind {
492 ; X32: # %bb.0: # %entry
493 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
494 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
495 ; X32-NEXT: paddw %xmm0, %xmm1
496 ; X32-NEXT: movq %xmm1, 0
500 ; X64: # %bb.0: # %entry
501 ; X64-NEXT: paddw %xmm1, %xmm0
502 ; X64-NEXT: movq %xmm0, 0
505 %tmp1 = bitcast double %a to <4 x i16>
506 %tmp2 = bitcast double %b to <4 x i16>
507 %tmp3 = add <4 x i16> %tmp1, %tmp2
508 store <4 x i16> %tmp3, ptr null
512 define void @ti32(double %a, double %b) nounwind {
514 ; X32: # %bb.0: # %entry
515 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
516 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
517 ; X32-NEXT: paddd %xmm0, %xmm1
518 ; X32-NEXT: movq %xmm1, 0
522 ; X64: # %bb.0: # %entry
523 ; X64-NEXT: paddd %xmm1, %xmm0
524 ; X64-NEXT: movq %xmm0, 0
527 %tmp1 = bitcast double %a to <2 x i32>
528 %tmp2 = bitcast double %b to <2 x i32>
529 %tmp3 = add <2 x i32> %tmp1, %tmp2
530 store <2 x i32> %tmp3, ptr null
534 define void @ti64(double %a, double %b) nounwind {
536 ; X32: # %bb.0: # %entry
537 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
538 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
539 ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
540 ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
541 ; X32-NEXT: movl %eax, 0
542 ; X32-NEXT: movl %ecx, 4
546 ; X64: # %bb.0: # %entry
547 ; X64-NEXT: movq %xmm0, %rax
548 ; X64-NEXT: movq %xmm1, %rcx
549 ; X64-NEXT: addq %rax, %rcx
550 ; X64-NEXT: movq %rcx, 0
553 %tmp1 = bitcast double %a to <1 x i64>
554 %tmp2 = bitcast double %b to <1 x i64>
555 %tmp3 = add <1 x i64> %tmp1, %tmp2
556 store <1 x i64> %tmp3, ptr null
560 ; MMX intrinsics calls get us MMX instructions.
561 define void @ti8a(double %a, double %b) nounwind {
563 ; X32: # %bb.0: # %entry
564 ; X32-NEXT: movq {{[0-9]+}}(%esp), %mm0
565 ; X32-NEXT: paddb {{[0-9]+}}(%esp), %mm0
566 ; X32-NEXT: movq %mm0, 0
570 ; X64: # %bb.0: # %entry
571 ; X64-NEXT: movdq2q %xmm0, %mm0
572 ; X64-NEXT: movdq2q %xmm1, %mm1
573 ; X64-NEXT: paddb %mm0, %mm1
574 ; X64-NEXT: movq %mm1, 0
577 %tmp1 = bitcast double %a to x86_mmx
578 %tmp2 = bitcast double %b to x86_mmx
579 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %tmp1, x86_mmx %tmp2)
580 store x86_mmx %tmp3, ptr null
584 define void @ti16a(double %a, double %b) nounwind {
586 ; X32: # %bb.0: # %entry
587 ; X32-NEXT: movq {{[0-9]+}}(%esp), %mm0
588 ; X32-NEXT: paddw {{[0-9]+}}(%esp), %mm0
589 ; X32-NEXT: movq %mm0, 0
593 ; X64: # %bb.0: # %entry
594 ; X64-NEXT: movdq2q %xmm0, %mm0
595 ; X64-NEXT: movdq2q %xmm1, %mm1
596 ; X64-NEXT: paddw %mm0, %mm1
597 ; X64-NEXT: movq %mm1, 0
600 %tmp1 = bitcast double %a to x86_mmx
601 %tmp2 = bitcast double %b to x86_mmx
602 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %tmp1, x86_mmx %tmp2)
603 store x86_mmx %tmp3, ptr null
607 define void @ti32a(double %a, double %b) nounwind {
609 ; X32: # %bb.0: # %entry
610 ; X32-NEXT: movq {{[0-9]+}}(%esp), %mm0
611 ; X32-NEXT: paddd {{[0-9]+}}(%esp), %mm0
612 ; X32-NEXT: movq %mm0, 0
616 ; X64: # %bb.0: # %entry
617 ; X64-NEXT: movdq2q %xmm0, %mm0
618 ; X64-NEXT: movdq2q %xmm1, %mm1
619 ; X64-NEXT: paddd %mm0, %mm1
620 ; X64-NEXT: movq %mm1, 0
623 %tmp1 = bitcast double %a to x86_mmx
624 %tmp2 = bitcast double %b to x86_mmx
625 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %tmp1, x86_mmx %tmp2)
626 store x86_mmx %tmp3, ptr null
630 define void @ti64a(double %a, double %b) nounwind {
632 ; X32: # %bb.0: # %entry
633 ; X32-NEXT: movq {{[0-9]+}}(%esp), %mm0
634 ; X32-NEXT: paddq {{[0-9]+}}(%esp), %mm0
635 ; X32-NEXT: movq %mm0, 0
639 ; X64: # %bb.0: # %entry
640 ; X64-NEXT: movdq2q %xmm0, %mm0
641 ; X64-NEXT: movdq2q %xmm1, %mm1
642 ; X64-NEXT: paddq %mm0, %mm1
643 ; X64-NEXT: movq %mm1, 0
646 %tmp1 = bitcast double %a to x86_mmx
647 %tmp2 = bitcast double %b to x86_mmx
648 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %tmp1, x86_mmx %tmp2)
649 store x86_mmx %tmp3, ptr null
653 ; Make sure we clamp large shift amounts to 255
654 define i64 @pr43922() {
655 ; X32-LABEL: pr43922:
656 ; X32: # %bb.0: # %entry
657 ; X32-NEXT: pushl %ebp
658 ; X32-NEXT: .cfi_def_cfa_offset 8
659 ; X32-NEXT: .cfi_offset %ebp, -8
660 ; X32-NEXT: movl %esp, %ebp
661 ; X32-NEXT: .cfi_def_cfa_register %ebp
662 ; X32-NEXT: andl $-8, %esp
663 ; X32-NEXT: subl $8, %esp
664 ; X32-NEXT: movq {{\.?LCPI[0-9]+_[0-9]+}}, %mm0 # mm0 = 0x7AAAAAAA7AAAAAAA
665 ; X32-NEXT: psrad $255, %mm0
666 ; X32-NEXT: movq %mm0, (%esp)
667 ; X32-NEXT: movl (%esp), %eax
668 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
669 ; X32-NEXT: movl %ebp, %esp
670 ; X32-NEXT: popl %ebp
671 ; X32-NEXT: .cfi_def_cfa %esp, 4
674 ; X64-LABEL: pr43922:
675 ; X64: # %bb.0: # %entry
676 ; X64-NEXT: movq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %mm0 # mm0 = 0x7AAAAAAA7AAAAAAA
677 ; X64-NEXT: psrad $255, %mm0
678 ; X64-NEXT: movq %mm0, %rax
681 %0 = tail call x86_mmx @llvm.x86.mmx.psrai.d(x86_mmx bitcast (<2 x i32> <i32 2058005162, i32 2058005162> to x86_mmx), i32 268435456)
682 %1 = bitcast x86_mmx %0 to i64
685 declare x86_mmx @llvm.x86.mmx.psrai.d(x86_mmx, i32)
687 declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
688 declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
689 declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
690 declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
692 declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx)
693 declare x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx, x86_mmx)
694 declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
695 declare x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx, x86_mmx)
696 declare x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx, x86_mmx)
697 declare x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx, x86_mmx)
699 declare void @llvm.x86.mmx.emms()
701 declare x86_mmx @llvm.x86.mmx.padds.b(x86_mmx, x86_mmx)
702 declare x86_mmx @llvm.x86.mmx.padds.w(x86_mmx, x86_mmx)
703 declare x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx, x86_mmx)
704 declare x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx, x86_mmx)