1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+mmx,+sse2 | FileCheck -check-prefix=X32 %s
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 | FileCheck -check-prefix=X64 %s
5 ;; A basic sanity check to make sure that MMX arithmetic actually compiles.
6 ;; First is a straight translation of the original with bitcasts as needed.
8 define void @test0(x86_mmx* %A, x86_mmx* %B) {
10 ; X32: # %bb.0: # %entry
11 ; X32-NEXT: pushl %ebp
12 ; X32-NEXT: .cfi_def_cfa_offset 8
13 ; X32-NEXT: .cfi_offset %ebp, -8
14 ; X32-NEXT: movl %esp, %ebp
15 ; X32-NEXT: .cfi_def_cfa_register %ebp
16 ; X32-NEXT: andl $-8, %esp
17 ; X32-NEXT: subl $16, %esp
18 ; X32-NEXT: movl 12(%ebp), %ecx
19 ; X32-NEXT: movl 8(%ebp), %eax
20 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
21 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
22 ; X32-NEXT: paddb %xmm0, %xmm1
23 ; X32-NEXT: movdq2q %xmm1, %mm0
24 ; X32-NEXT: movq %xmm1, (%eax)
25 ; X32-NEXT: paddsb (%ecx), %mm0
26 ; X32-NEXT: movq %mm0, (%eax)
27 ; X32-NEXT: paddusb (%ecx), %mm0
28 ; X32-NEXT: movq %mm0, {{[0-9]+}}(%esp)
29 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
30 ; X32-NEXT: movq %mm0, (%eax)
31 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
32 ; X32-NEXT: psubb %xmm1, %xmm0
33 ; X32-NEXT: movdq2q %xmm0, %mm0
34 ; X32-NEXT: movq %xmm0, (%eax)
35 ; X32-NEXT: psubsb (%ecx), %mm0
36 ; X32-NEXT: movq %mm0, (%eax)
37 ; X32-NEXT: psubusb (%ecx), %mm0
38 ; X32-NEXT: movq %mm0, (%esp)
39 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
40 ; X32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
41 ; X32-NEXT: movq %mm0, (%eax)
42 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
43 ; X32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
44 ; X32-NEXT: pmullw %xmm0, %xmm1
45 ; X32-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255]
46 ; X32-NEXT: movdqa %xmm1, %xmm2
47 ; X32-NEXT: pand %xmm0, %xmm2
48 ; X32-NEXT: packuswb %xmm2, %xmm2
49 ; X32-NEXT: movq %xmm2, (%eax)
50 ; X32-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
51 ; X32-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
52 ; X32-NEXT: pand %xmm1, %xmm2
53 ; X32-NEXT: movdqa %xmm2, %xmm1
54 ; X32-NEXT: pand %xmm0, %xmm1
55 ; X32-NEXT: packuswb %xmm1, %xmm1
56 ; X32-NEXT: movq %xmm1, (%eax)
57 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
58 ; X32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
59 ; X32-NEXT: por %xmm2, %xmm1
60 ; X32-NEXT: movdqa %xmm1, %xmm2
61 ; X32-NEXT: pand %xmm0, %xmm2
62 ; X32-NEXT: packuswb %xmm2, %xmm2
63 ; X32-NEXT: movq %xmm2, (%eax)
64 ; X32-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
65 ; X32-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
66 ; X32-NEXT: pxor %xmm1, %xmm2
67 ; X32-NEXT: pand %xmm0, %xmm2
68 ; X32-NEXT: packuswb %xmm2, %xmm2
69 ; X32-NEXT: movq %xmm2, (%eax)
71 ; X32-NEXT: movl %ebp, %esp
73 ; X32-NEXT: .cfi_def_cfa %esp, 4
77 ; X64: # %bb.0: # %entry
78 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
79 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
80 ; X64-NEXT: paddb %xmm0, %xmm1
81 ; X64-NEXT: movdq2q %xmm1, %mm0
82 ; X64-NEXT: movq %xmm1, (%rdi)
83 ; X64-NEXT: paddsb (%rsi), %mm0
84 ; X64-NEXT: movq %mm0, (%rdi)
85 ; X64-NEXT: paddusb (%rsi), %mm0
86 ; X64-NEXT: movq %mm0, -{{[0-9]+}}(%rsp)
87 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
88 ; X64-NEXT: movq %mm0, (%rdi)
89 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
90 ; X64-NEXT: psubb %xmm1, %xmm0
91 ; X64-NEXT: movdq2q %xmm0, %mm0
92 ; X64-NEXT: movq %xmm0, (%rdi)
93 ; X64-NEXT: psubsb (%rsi), %mm0
94 ; X64-NEXT: movq %mm0, (%rdi)
95 ; X64-NEXT: psubusb (%rsi), %mm0
96 ; X64-NEXT: movq %mm0, -{{[0-9]+}}(%rsp)
97 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
98 ; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
99 ; X64-NEXT: movq %mm0, (%rdi)
100 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
101 ; X64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
102 ; X64-NEXT: pmullw %xmm0, %xmm1
103 ; X64-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255]
104 ; X64-NEXT: movdqa %xmm1, %xmm2
105 ; X64-NEXT: pand %xmm0, %xmm2
106 ; X64-NEXT: packuswb %xmm2, %xmm2
107 ; X64-NEXT: movq %xmm2, (%rdi)
108 ; X64-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
109 ; X64-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
110 ; X64-NEXT: pand %xmm1, %xmm2
111 ; X64-NEXT: movdqa %xmm2, %xmm1
112 ; X64-NEXT: pand %xmm0, %xmm1
113 ; X64-NEXT: packuswb %xmm1, %xmm1
114 ; X64-NEXT: movq %xmm1, (%rdi)
115 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
116 ; X64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
117 ; X64-NEXT: por %xmm2, %xmm1
118 ; X64-NEXT: movdqa %xmm1, %xmm2
119 ; X64-NEXT: pand %xmm0, %xmm2
120 ; X64-NEXT: packuswb %xmm2, %xmm2
121 ; X64-NEXT: movq %xmm2, (%rdi)
122 ; X64-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
123 ; X64-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
124 ; X64-NEXT: pxor %xmm1, %xmm2
125 ; X64-NEXT: pand %xmm0, %xmm2
126 ; X64-NEXT: packuswb %xmm2, %xmm2
127 ; X64-NEXT: movq %xmm2, (%rdi)
131 %tmp1 = load x86_mmx, x86_mmx* %A
132 %tmp3 = load x86_mmx, x86_mmx* %B
133 %tmp1a = bitcast x86_mmx %tmp1 to <8 x i8>
134 %tmp3a = bitcast x86_mmx %tmp3 to <8 x i8>
135 %tmp4 = add <8 x i8> %tmp1a, %tmp3a
136 %tmp4a = bitcast <8 x i8> %tmp4 to x86_mmx
137 store x86_mmx %tmp4a, x86_mmx* %A
138 %tmp7 = load x86_mmx, x86_mmx* %B
139 %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b(x86_mmx %tmp4a, x86_mmx %tmp7)
140 store x86_mmx %tmp12, x86_mmx* %A
141 %tmp16 = load x86_mmx, x86_mmx* %B
142 %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx %tmp12, x86_mmx %tmp16)
143 store x86_mmx %tmp21, x86_mmx* %A
144 %tmp27 = load x86_mmx, x86_mmx* %B
145 %tmp21a = bitcast x86_mmx %tmp21 to <8 x i8>
146 %tmp27a = bitcast x86_mmx %tmp27 to <8 x i8>
147 %tmp28 = sub <8 x i8> %tmp21a, %tmp27a
148 %tmp28a = bitcast <8 x i8> %tmp28 to x86_mmx
149 store x86_mmx %tmp28a, x86_mmx* %A
150 %tmp31 = load x86_mmx, x86_mmx* %B
151 %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx %tmp28a, x86_mmx %tmp31)
152 store x86_mmx %tmp36, x86_mmx* %A
153 %tmp40 = load x86_mmx, x86_mmx* %B
154 %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx %tmp36, x86_mmx %tmp40)
155 store x86_mmx %tmp45, x86_mmx* %A
156 %tmp51 = load x86_mmx, x86_mmx* %B
157 %tmp45a = bitcast x86_mmx %tmp45 to <8 x i8>
158 %tmp51a = bitcast x86_mmx %tmp51 to <8 x i8>
159 %tmp52 = mul <8 x i8> %tmp45a, %tmp51a
160 %tmp52a = bitcast <8 x i8> %tmp52 to x86_mmx
161 store x86_mmx %tmp52a, x86_mmx* %A
162 %tmp57 = load x86_mmx, x86_mmx* %B
163 %tmp57a = bitcast x86_mmx %tmp57 to <8 x i8>
164 %tmp58 = and <8 x i8> %tmp52, %tmp57a
165 %tmp58a = bitcast <8 x i8> %tmp58 to x86_mmx
166 store x86_mmx %tmp58a, x86_mmx* %A
167 %tmp63 = load x86_mmx, x86_mmx* %B
168 %tmp63a = bitcast x86_mmx %tmp63 to <8 x i8>
169 %tmp64 = or <8 x i8> %tmp58, %tmp63a
170 %tmp64a = bitcast <8 x i8> %tmp64 to x86_mmx
171 store x86_mmx %tmp64a, x86_mmx* %A
172 %tmp69 = load x86_mmx, x86_mmx* %B
173 %tmp69a = bitcast x86_mmx %tmp69 to <8 x i8>
174 %tmp64b = bitcast x86_mmx %tmp64a to <8 x i8>
175 %tmp70 = xor <8 x i8> %tmp64b, %tmp69a
176 %tmp70a = bitcast <8 x i8> %tmp70 to x86_mmx
177 store x86_mmx %tmp70a, x86_mmx* %A
178 tail call void @llvm.x86.mmx.emms()
182 define void @test1(x86_mmx* %A, x86_mmx* %B) {
184 ; X32: # %bb.0: # %entry
185 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
186 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
187 ; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
188 ; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1,1,3]
189 ; X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
190 ; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1,1,3]
191 ; X32-NEXT: paddq %xmm0, %xmm1
192 ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
193 ; X32-NEXT: movq %xmm0, (%eax)
194 ; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
195 ; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1,1,3]
196 ; X32-NEXT: pmuludq %xmm1, %xmm0
197 ; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
198 ; X32-NEXT: movq %xmm1, (%eax)
199 ; X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
200 ; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1,1,3]
201 ; X32-NEXT: andps %xmm0, %xmm1
202 ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
203 ; X32-NEXT: movq %xmm0, (%eax)
204 ; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
205 ; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1,1,3]
206 ; X32-NEXT: orps %xmm1, %xmm0
207 ; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
208 ; X32-NEXT: movq %xmm1, (%eax)
209 ; X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
210 ; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1,1,3]
211 ; X32-NEXT: xorps %xmm0, %xmm1
212 ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
213 ; X32-NEXT: movq %xmm0, (%eax)
218 ; X64: # %bb.0: # %entry
219 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
220 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
221 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
222 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
223 ; X64-NEXT: paddq %xmm0, %xmm1
224 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
225 ; X64-NEXT: movq %xmm0, (%rdi)
226 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
227 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
228 ; X64-NEXT: pmuludq %xmm1, %xmm0
229 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
230 ; X64-NEXT: movq %xmm1, (%rdi)
231 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
232 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
233 ; X64-NEXT: pand %xmm0, %xmm1
234 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
235 ; X64-NEXT: movq %xmm0, (%rdi)
236 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
237 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
238 ; X64-NEXT: por %xmm1, %xmm0
239 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
240 ; X64-NEXT: movq %xmm1, (%rdi)
241 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
242 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
243 ; X64-NEXT: pxor %xmm0, %xmm1
244 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
245 ; X64-NEXT: movq %xmm0, (%rdi)
249 %tmp1 = load x86_mmx, x86_mmx* %A
250 %tmp3 = load x86_mmx, x86_mmx* %B
251 %tmp1a = bitcast x86_mmx %tmp1 to <2 x i32>
252 %tmp3a = bitcast x86_mmx %tmp3 to <2 x i32>
253 %tmp4 = add <2 x i32> %tmp1a, %tmp3a
254 %tmp4a = bitcast <2 x i32> %tmp4 to x86_mmx
255 store x86_mmx %tmp4a, x86_mmx* %A
256 %tmp9 = load x86_mmx, x86_mmx* %B
257 %tmp9a = bitcast x86_mmx %tmp9 to <2 x i32>
258 %tmp10 = sub <2 x i32> %tmp4, %tmp9a
259 %tmp10a = bitcast <2 x i32> %tmp4 to x86_mmx
260 store x86_mmx %tmp10a, x86_mmx* %A
261 %tmp15 = load x86_mmx, x86_mmx* %B
262 %tmp10b = bitcast x86_mmx %tmp10a to <2 x i32>
263 %tmp15a = bitcast x86_mmx %tmp15 to <2 x i32>
264 %tmp16 = mul <2 x i32> %tmp10b, %tmp15a
265 %tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx
266 store x86_mmx %tmp16a, x86_mmx* %A
267 %tmp21 = load x86_mmx, x86_mmx* %B
268 %tmp16b = bitcast x86_mmx %tmp16a to <2 x i32>
269 %tmp21a = bitcast x86_mmx %tmp21 to <2 x i32>
270 %tmp22 = and <2 x i32> %tmp16b, %tmp21a
271 %tmp22a = bitcast <2 x i32> %tmp22 to x86_mmx
272 store x86_mmx %tmp22a, x86_mmx* %A
273 %tmp27 = load x86_mmx, x86_mmx* %B
274 %tmp22b = bitcast x86_mmx %tmp22a to <2 x i32>
275 %tmp27a = bitcast x86_mmx %tmp27 to <2 x i32>
276 %tmp28 = or <2 x i32> %tmp22b, %tmp27a
277 %tmp28a = bitcast <2 x i32> %tmp28 to x86_mmx
278 store x86_mmx %tmp28a, x86_mmx* %A
279 %tmp33 = load x86_mmx, x86_mmx* %B
280 %tmp28b = bitcast x86_mmx %tmp28a to <2 x i32>
281 %tmp33a = bitcast x86_mmx %tmp33 to <2 x i32>
282 %tmp34 = xor <2 x i32> %tmp28b, %tmp33a
283 %tmp34a = bitcast <2 x i32> %tmp34 to x86_mmx
284 store x86_mmx %tmp34a, x86_mmx* %A
285 tail call void @llvm.x86.mmx.emms( )
289 define void @test2(x86_mmx* %A, x86_mmx* %B) {
291 ; X32: # %bb.0: # %entry
292 ; X32-NEXT: pushl %ebp
293 ; X32-NEXT: .cfi_def_cfa_offset 8
294 ; X32-NEXT: .cfi_offset %ebp, -8
295 ; X32-NEXT: movl %esp, %ebp
296 ; X32-NEXT: .cfi_def_cfa_register %ebp
297 ; X32-NEXT: andl $-8, %esp
298 ; X32-NEXT: subl $24, %esp
299 ; X32-NEXT: movl 12(%ebp), %ecx
300 ; X32-NEXT: movl 8(%ebp), %eax
301 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
302 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
303 ; X32-NEXT: paddw %xmm0, %xmm1
304 ; X32-NEXT: movdq2q %xmm1, %mm0
305 ; X32-NEXT: movq %xmm1, (%eax)
306 ; X32-NEXT: paddsw (%ecx), %mm0
307 ; X32-NEXT: movq %mm0, (%eax)
308 ; X32-NEXT: paddusw (%ecx), %mm0
309 ; X32-NEXT: movq %mm0, {{[0-9]+}}(%esp)
310 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
311 ; X32-NEXT: movq %mm0, (%eax)
312 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
313 ; X32-NEXT: psubw %xmm1, %xmm0
314 ; X32-NEXT: movdq2q %xmm0, %mm0
315 ; X32-NEXT: movq %xmm0, (%eax)
316 ; X32-NEXT: psubsw (%ecx), %mm0
317 ; X32-NEXT: movq %mm0, (%eax)
318 ; X32-NEXT: psubusw (%ecx), %mm0
319 ; X32-NEXT: movq %mm0, {{[0-9]+}}(%esp)
320 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
321 ; X32-NEXT: movq %mm0, (%eax)
322 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
323 ; X32-NEXT: pmullw %xmm0, %xmm1
324 ; X32-NEXT: movdq2q %xmm1, %mm0
325 ; X32-NEXT: movq %xmm1, (%eax)
326 ; X32-NEXT: pmulhw (%ecx), %mm0
327 ; X32-NEXT: movq %mm0, (%eax)
328 ; X32-NEXT: pmaddwd (%ecx), %mm0
329 ; X32-NEXT: movq %mm0, (%esp)
330 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
331 ; X32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
332 ; X32-NEXT: movq %mm0, (%eax)
333 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
334 ; X32-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
335 ; X32-NEXT: pand %xmm0, %xmm1
336 ; X32-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
337 ; X32-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
338 ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
339 ; X32-NEXT: movq %xmm0, (%eax)
340 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
341 ; X32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
342 ; X32-NEXT: por %xmm1, %xmm0
343 ; X32-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
344 ; X32-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
345 ; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
346 ; X32-NEXT: movq %xmm1, (%eax)
347 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
348 ; X32-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
349 ; X32-NEXT: pxor %xmm0, %xmm1
350 ; X32-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
351 ; X32-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
352 ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
353 ; X32-NEXT: movq %xmm0, (%eax)
355 ; X32-NEXT: movl %ebp, %esp
356 ; X32-NEXT: popl %ebp
357 ; X32-NEXT: .cfi_def_cfa %esp, 4
361 ; X64: # %bb.0: # %entry
362 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
363 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
364 ; X64-NEXT: paddw %xmm0, %xmm1
365 ; X64-NEXT: movdq2q %xmm1, %mm0
366 ; X64-NEXT: movq %xmm1, (%rdi)
367 ; X64-NEXT: paddsw (%rsi), %mm0
368 ; X64-NEXT: movq %mm0, (%rdi)
369 ; X64-NEXT: paddusw (%rsi), %mm0
370 ; X64-NEXT: movq %mm0, -{{[0-9]+}}(%rsp)
371 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
372 ; X64-NEXT: movq %mm0, (%rdi)
373 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
374 ; X64-NEXT: psubw %xmm1, %xmm0
375 ; X64-NEXT: movdq2q %xmm0, %mm0
376 ; X64-NEXT: movq %xmm0, (%rdi)
377 ; X64-NEXT: psubsw (%rsi), %mm0
378 ; X64-NEXT: movq %mm0, (%rdi)
379 ; X64-NEXT: psubusw (%rsi), %mm0
380 ; X64-NEXT: movq %mm0, -{{[0-9]+}}(%rsp)
381 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
382 ; X64-NEXT: movq %mm0, (%rdi)
383 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
384 ; X64-NEXT: pmullw %xmm0, %xmm1
385 ; X64-NEXT: movdq2q %xmm1, %mm0
386 ; X64-NEXT: movq %xmm1, (%rdi)
387 ; X64-NEXT: pmulhw (%rsi), %mm0
388 ; X64-NEXT: movq %mm0, (%rdi)
389 ; X64-NEXT: pmaddwd (%rsi), %mm0
390 ; X64-NEXT: movq %mm0, -{{[0-9]+}}(%rsp)
391 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
392 ; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
393 ; X64-NEXT: movq %mm0, (%rdi)
394 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
395 ; X64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
396 ; X64-NEXT: pand %xmm0, %xmm1
397 ; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
398 ; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
399 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
400 ; X64-NEXT: movq %xmm0, (%rdi)
401 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
402 ; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
403 ; X64-NEXT: por %xmm1, %xmm0
404 ; X64-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
405 ; X64-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
406 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
407 ; X64-NEXT: movq %xmm1, (%rdi)
408 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
409 ; X64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
410 ; X64-NEXT: pxor %xmm0, %xmm1
411 ; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
412 ; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
413 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
414 ; X64-NEXT: movq %xmm0, (%rdi)
418 %tmp1 = load x86_mmx, x86_mmx* %A
419 %tmp3 = load x86_mmx, x86_mmx* %B
420 %tmp1a = bitcast x86_mmx %tmp1 to <4 x i16>
421 %tmp3a = bitcast x86_mmx %tmp3 to <4 x i16>
422 %tmp4 = add <4 x i16> %tmp1a, %tmp3a
423 %tmp4a = bitcast <4 x i16> %tmp4 to x86_mmx
424 store x86_mmx %tmp4a, x86_mmx* %A
425 %tmp7 = load x86_mmx, x86_mmx* %B
426 %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w(x86_mmx %tmp4a, x86_mmx %tmp7)
427 store x86_mmx %tmp12, x86_mmx* %A
428 %tmp16 = load x86_mmx, x86_mmx* %B
429 %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %tmp12, x86_mmx %tmp16)
430 store x86_mmx %tmp21, x86_mmx* %A
431 %tmp27 = load x86_mmx, x86_mmx* %B
432 %tmp21a = bitcast x86_mmx %tmp21 to <4 x i16>
433 %tmp27a = bitcast x86_mmx %tmp27 to <4 x i16>
434 %tmp28 = sub <4 x i16> %tmp21a, %tmp27a
435 %tmp28a = bitcast <4 x i16> %tmp28 to x86_mmx
436 store x86_mmx %tmp28a, x86_mmx* %A
437 %tmp31 = load x86_mmx, x86_mmx* %B
438 %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx %tmp28a, x86_mmx %tmp31)
439 store x86_mmx %tmp36, x86_mmx* %A
440 %tmp40 = load x86_mmx, x86_mmx* %B
441 %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx %tmp36, x86_mmx %tmp40)
442 store x86_mmx %tmp45, x86_mmx* %A
443 %tmp51 = load x86_mmx, x86_mmx* %B
444 %tmp45a = bitcast x86_mmx %tmp45 to <4 x i16>
445 %tmp51a = bitcast x86_mmx %tmp51 to <4 x i16>
446 %tmp52 = mul <4 x i16> %tmp45a, %tmp51a
447 %tmp52a = bitcast <4 x i16> %tmp52 to x86_mmx
448 store x86_mmx %tmp52a, x86_mmx* %A
449 %tmp55 = load x86_mmx, x86_mmx* %B
450 %tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx %tmp52a, x86_mmx %tmp55)
451 store x86_mmx %tmp60, x86_mmx* %A
452 %tmp64 = load x86_mmx, x86_mmx* %B
453 %tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx %tmp60, x86_mmx %tmp64)
454 %tmp70 = bitcast x86_mmx %tmp69 to x86_mmx
455 store x86_mmx %tmp70, x86_mmx* %A
456 %tmp75 = load x86_mmx, x86_mmx* %B
457 %tmp70a = bitcast x86_mmx %tmp70 to <4 x i16>
458 %tmp75a = bitcast x86_mmx %tmp75 to <4 x i16>
459 %tmp76 = and <4 x i16> %tmp70a, %tmp75a
460 %tmp76a = bitcast <4 x i16> %tmp76 to x86_mmx
461 store x86_mmx %tmp76a, x86_mmx* %A
462 %tmp81 = load x86_mmx, x86_mmx* %B
463 %tmp76b = bitcast x86_mmx %tmp76a to <4 x i16>
464 %tmp81a = bitcast x86_mmx %tmp81 to <4 x i16>
465 %tmp82 = or <4 x i16> %tmp76b, %tmp81a
466 %tmp82a = bitcast <4 x i16> %tmp82 to x86_mmx
467 store x86_mmx %tmp82a, x86_mmx* %A
468 %tmp87 = load x86_mmx, x86_mmx* %B
469 %tmp82b = bitcast x86_mmx %tmp82a to <4 x i16>
470 %tmp87a = bitcast x86_mmx %tmp87 to <4 x i16>
471 %tmp88 = xor <4 x i16> %tmp82b, %tmp87a
472 %tmp88a = bitcast <4 x i16> %tmp88 to x86_mmx
473 store x86_mmx %tmp88a, x86_mmx* %A
474 tail call void @llvm.x86.mmx.emms( )
478 define <1 x i64> @test3(<1 x i64>* %a, <1 x i64>* %b, i32 %count) nounwind {
480 ; X32: # %bb.0: # %entry
481 ; X32-NEXT: pushl %ebp
482 ; X32-NEXT: movl %esp, %ebp
483 ; X32-NEXT: pushl %ebx
484 ; X32-NEXT: pushl %edi
485 ; X32-NEXT: pushl %esi
486 ; X32-NEXT: andl $-8, %esp
487 ; X32-NEXT: subl $16, %esp
488 ; X32-NEXT: cmpl $0, 16(%ebp)
489 ; X32-NEXT: je .LBB3_1
490 ; X32-NEXT: # %bb.2: # %bb26.preheader
491 ; X32-NEXT: xorl %ebx, %ebx
492 ; X32-NEXT: xorl %eax, %eax
493 ; X32-NEXT: xorl %edx, %edx
494 ; X32-NEXT: .p2align 4, 0x90
495 ; X32-NEXT: .LBB3_3: # %bb26
496 ; X32-NEXT: # =>This Inner Loop Header: Depth=1
497 ; X32-NEXT: movl 8(%ebp), %ecx
498 ; X32-NEXT: movl %ecx, %esi
499 ; X32-NEXT: movl (%ecx,%ebx,8), %ecx
500 ; X32-NEXT: movl 4(%esi,%ebx,8), %esi
501 ; X32-NEXT: movl 12(%ebp), %edi
502 ; X32-NEXT: addl (%edi,%ebx,8), %ecx
503 ; X32-NEXT: adcl 4(%edi,%ebx,8), %esi
504 ; X32-NEXT: addl %eax, %ecx
505 ; X32-NEXT: movl %ecx, (%esp)
506 ; X32-NEXT: adcl %edx, %esi
507 ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp)
508 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
509 ; X32-NEXT: movd %xmm0, %eax
510 ; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,0,1]
511 ; X32-NEXT: movd %xmm0, %edx
512 ; X32-NEXT: incl %ebx
513 ; X32-NEXT: cmpl 16(%ebp), %ebx
514 ; X32-NEXT: jb .LBB3_3
515 ; X32-NEXT: jmp .LBB3_4
517 ; X32-NEXT: xorl %eax, %eax
518 ; X32-NEXT: xorl %edx, %edx
519 ; X32-NEXT: .LBB3_4: # %bb31
520 ; X32-NEXT: leal -12(%ebp), %esp
521 ; X32-NEXT: popl %esi
522 ; X32-NEXT: popl %edi
523 ; X32-NEXT: popl %ebx
524 ; X32-NEXT: popl %ebp
528 ; X64: # %bb.0: # %entry
529 ; X64-NEXT: xorl %r8d, %r8d
530 ; X64-NEXT: xorl %eax, %eax
531 ; X64-NEXT: testl %edx, %edx
532 ; X64-NEXT: je .LBB3_2
533 ; X64-NEXT: .p2align 4, 0x90
534 ; X64-NEXT: .LBB3_1: # %bb26
535 ; X64-NEXT: # =>This Inner Loop Header: Depth=1
536 ; X64-NEXT: movslq %r8d, %r8
537 ; X64-NEXT: movq (%rdi,%r8,8), %rcx
538 ; X64-NEXT: addq (%rsi,%r8,8), %rcx
539 ; X64-NEXT: addq %rcx, %rax
540 ; X64-NEXT: incl %r8d
541 ; X64-NEXT: cmpl %edx, %r8d
542 ; X64-NEXT: jb .LBB3_1
543 ; X64-NEXT: .LBB3_2: # %bb31
546 %tmp2942 = icmp eq i32 %count, 0
547 br i1 %tmp2942, label %bb31, label %bb26
550 %i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ]
551 %sum.035.0 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ]
552 %tmp13 = getelementptr <1 x i64>, <1 x i64>* %b, i32 %i.037.0
553 %tmp14 = load <1 x i64>, <1 x i64>* %tmp13
554 %tmp18 = getelementptr <1 x i64>, <1 x i64>* %a, i32 %i.037.0
555 %tmp19 = load <1 x i64>, <1 x i64>* %tmp18
556 %tmp21 = add <1 x i64> %tmp19, %tmp14
557 %tmp22 = add <1 x i64> %tmp21, %sum.035.0
558 %tmp25 = add i32 %i.037.0, 1
559 %tmp29 = icmp ult i32 %tmp25, %count
560 br i1 %tmp29, label %bb26, label %bb31
563 %sum.035.1 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ]
564 ret <1 x i64> %sum.035.1
567 ; There are no MMX operations here, so we use XMM or i64.
568 define void @ti8(double %a, double %b) nounwind {
570 ; X32: # %bb.0: # %entry
571 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
572 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
573 ; X32-NEXT: paddb %xmm0, %xmm1
574 ; X32-NEXT: movq %xmm1, 0
578 ; X64: # %bb.0: # %entry
579 ; X64-NEXT: paddb %xmm1, %xmm0
580 ; X64-NEXT: movq %xmm0, 0
583 %tmp1 = bitcast double %a to <8 x i8>
584 %tmp2 = bitcast double %b to <8 x i8>
585 %tmp3 = add <8 x i8> %tmp1, %tmp2
586 store <8 x i8> %tmp3, <8 x i8>* null
590 define void @ti16(double %a, double %b) nounwind {
592 ; X32: # %bb.0: # %entry
593 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
594 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
595 ; X32-NEXT: paddw %xmm0, %xmm1
596 ; X32-NEXT: movq %xmm1, 0
600 ; X64: # %bb.0: # %entry
601 ; X64-NEXT: paddw %xmm1, %xmm0
602 ; X64-NEXT: movq %xmm0, 0
605 %tmp1 = bitcast double %a to <4 x i16>
606 %tmp2 = bitcast double %b to <4 x i16>
607 %tmp3 = add <4 x i16> %tmp1, %tmp2
608 store <4 x i16> %tmp3, <4 x i16>* null
612 define void @ti32(double %a, double %b) nounwind {
614 ; X32: # %bb.0: # %entry
615 ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
616 ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
617 ; X32-NEXT: paddd %xmm0, %xmm1
618 ; X32-NEXT: movq %xmm1, 0
622 ; X64: # %bb.0: # %entry
623 ; X64-NEXT: paddd %xmm1, %xmm0
624 ; X64-NEXT: movq %xmm0, 0
627 %tmp1 = bitcast double %a to <2 x i32>
628 %tmp2 = bitcast double %b to <2 x i32>
629 %tmp3 = add <2 x i32> %tmp1, %tmp2
630 store <2 x i32> %tmp3, <2 x i32>* null
634 define void @ti64(double %a, double %b) nounwind {
636 ; X32: # %bb.0: # %entry
637 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
638 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
639 ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
640 ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
641 ; X32-NEXT: movl %eax, 0
642 ; X32-NEXT: movl %ecx, 4
646 ; X64: # %bb.0: # %entry
647 ; X64-NEXT: movq %xmm0, %rax
648 ; X64-NEXT: movq %xmm1, %rcx
649 ; X64-NEXT: addq %rax, %rcx
650 ; X64-NEXT: movq %rcx, 0
653 %tmp1 = bitcast double %a to <1 x i64>
654 %tmp2 = bitcast double %b to <1 x i64>
655 %tmp3 = add <1 x i64> %tmp1, %tmp2
656 store <1 x i64> %tmp3, <1 x i64>* null
660 ; MMX intrinsics calls get us MMX instructions.
661 define void @ti8a(double %a, double %b) nounwind {
663 ; X32: # %bb.0: # %entry
664 ; X32-NEXT: movq {{[0-9]+}}(%esp), %mm0
665 ; X32-NEXT: paddb {{[0-9]+}}(%esp), %mm0
666 ; X32-NEXT: movq %mm0, 0
670 ; X64: # %bb.0: # %entry
671 ; X64-NEXT: movdq2q %xmm0, %mm0
672 ; X64-NEXT: movdq2q %xmm1, %mm1
673 ; X64-NEXT: paddb %mm0, %mm1
674 ; X64-NEXT: movq %mm1, 0
677 %tmp1 = bitcast double %a to x86_mmx
678 %tmp2 = bitcast double %b to x86_mmx
679 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %tmp1, x86_mmx %tmp2)
680 store x86_mmx %tmp3, x86_mmx* null
684 define void @ti16a(double %a, double %b) nounwind {
686 ; X32: # %bb.0: # %entry
687 ; X32-NEXT: movq {{[0-9]+}}(%esp), %mm0
688 ; X32-NEXT: paddw {{[0-9]+}}(%esp), %mm0
689 ; X32-NEXT: movq %mm0, 0
693 ; X64: # %bb.0: # %entry
694 ; X64-NEXT: movdq2q %xmm0, %mm0
695 ; X64-NEXT: movdq2q %xmm1, %mm1
696 ; X64-NEXT: paddw %mm0, %mm1
697 ; X64-NEXT: movq %mm1, 0
700 %tmp1 = bitcast double %a to x86_mmx
701 %tmp2 = bitcast double %b to x86_mmx
702 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %tmp1, x86_mmx %tmp2)
703 store x86_mmx %tmp3, x86_mmx* null
707 define void @ti32a(double %a, double %b) nounwind {
709 ; X32: # %bb.0: # %entry
710 ; X32-NEXT: movq {{[0-9]+}}(%esp), %mm0
711 ; X32-NEXT: paddd {{[0-9]+}}(%esp), %mm0
712 ; X32-NEXT: movq %mm0, 0
716 ; X64: # %bb.0: # %entry
717 ; X64-NEXT: movdq2q %xmm0, %mm0
718 ; X64-NEXT: movdq2q %xmm1, %mm1
719 ; X64-NEXT: paddd %mm0, %mm1
720 ; X64-NEXT: movq %mm1, 0
723 %tmp1 = bitcast double %a to x86_mmx
724 %tmp2 = bitcast double %b to x86_mmx
725 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %tmp1, x86_mmx %tmp2)
726 store x86_mmx %tmp3, x86_mmx* null
730 define void @ti64a(double %a, double %b) nounwind {
732 ; X32: # %bb.0: # %entry
733 ; X32-NEXT: movq {{[0-9]+}}(%esp), %mm0
734 ; X32-NEXT: paddq {{[0-9]+}}(%esp), %mm0
735 ; X32-NEXT: movq %mm0, 0
739 ; X64: # %bb.0: # %entry
740 ; X64-NEXT: movdq2q %xmm0, %mm0
741 ; X64-NEXT: movdq2q %xmm1, %mm1
742 ; X64-NEXT: paddq %mm0, %mm1
743 ; X64-NEXT: movq %mm1, 0
746 %tmp1 = bitcast double %a to x86_mmx
747 %tmp2 = bitcast double %b to x86_mmx
748 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %tmp1, x86_mmx %tmp2)
749 store x86_mmx %tmp3, x86_mmx* null
753 declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
754 declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
755 declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
756 declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
758 declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx)
759 declare x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx, x86_mmx)
760 declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
761 declare x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx, x86_mmx)
762 declare x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx, x86_mmx)
763 declare x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx, x86_mmx)
765 declare void @llvm.x86.mmx.emms()
767 declare x86_mmx @llvm.x86.mmx.padds.b(x86_mmx, x86_mmx)
768 declare x86_mmx @llvm.x86.mmx.padds.w(x86_mmx, x86_mmx)
769 declare x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx, x86_mmx)
770 declare x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx, x86_mmx)