1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -disable-peephole -mtriple=i686-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X86
3 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X64
5 define i64 @t0(ptr %a, ptr %b) nounwind {
7 ; X86: # %bb.0: # %entry
9 ; X86-NEXT: movl %esp, %ebp
10 ; X86-NEXT: andl $-8, %esp
11 ; X86-NEXT: subl $8, %esp
12 ; X86-NEXT: movl 12(%ebp), %eax
13 ; X86-NEXT: movl 8(%ebp), %ecx
14 ; X86-NEXT: movq (%ecx), %mm0
15 ; X86-NEXT: movd (%eax), %mm1
16 ; X86-NEXT: psllq %mm1, %mm0
17 ; X86-NEXT: movq %mm0, (%esp)
18 ; X86-NEXT: movl (%esp), %eax
19 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
20 ; X86-NEXT: movl %ebp, %esp
25 ; X64: # %bb.0: # %entry
26 ; X64-NEXT: movq (%rdi), %mm0
27 ; X64-NEXT: movd (%rsi), %mm1
28 ; X64-NEXT: psllq %mm1, %mm0
29 ; X64-NEXT: movq %mm0, %rax
32 %0 = load x86_mmx, ptr %a, align 8
33 %1 = load i32, ptr %b, align 4
34 %2 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %0, i32 %1)
35 %3 = bitcast x86_mmx %2 to i64
38 declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32)
40 define i64 @t1(ptr %a, ptr %b) nounwind {
42 ; X86: # %bb.0: # %entry
43 ; X86-NEXT: pushl %ebp
44 ; X86-NEXT: movl %esp, %ebp
45 ; X86-NEXT: andl $-8, %esp
46 ; X86-NEXT: subl $8, %esp
47 ; X86-NEXT: movl 12(%ebp), %eax
48 ; X86-NEXT: movl 8(%ebp), %ecx
49 ; X86-NEXT: movq (%ecx), %mm0
50 ; X86-NEXT: movd (%eax), %mm1
51 ; X86-NEXT: psrlq %mm1, %mm0
52 ; X86-NEXT: movq %mm0, (%esp)
53 ; X86-NEXT: movl (%esp), %eax
54 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
55 ; X86-NEXT: movl %ebp, %esp
60 ; X64: # %bb.0: # %entry
61 ; X64-NEXT: movq (%rdi), %mm0
62 ; X64-NEXT: movd (%rsi), %mm1
63 ; X64-NEXT: psrlq %mm1, %mm0
64 ; X64-NEXT: movq %mm0, %rax
67 %0 = load x86_mmx, ptr %a, align 8
68 %1 = load i32, ptr %b, align 4
69 %2 = tail call x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx %0, i32 %1)
70 %3 = bitcast x86_mmx %2 to i64
73 declare x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx, i32)
75 define i64 @t2(ptr %a, ptr %b) nounwind {
77 ; X86: # %bb.0: # %entry
78 ; X86-NEXT: pushl %ebp
79 ; X86-NEXT: movl %esp, %ebp
80 ; X86-NEXT: andl $-8, %esp
81 ; X86-NEXT: subl $8, %esp
82 ; X86-NEXT: movl 12(%ebp), %eax
83 ; X86-NEXT: movl 8(%ebp), %ecx
84 ; X86-NEXT: movq (%ecx), %mm0
85 ; X86-NEXT: movd (%eax), %mm1
86 ; X86-NEXT: psllw %mm1, %mm0
87 ; X86-NEXT: movq %mm0, (%esp)
88 ; X86-NEXT: movl (%esp), %eax
89 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
90 ; X86-NEXT: movl %ebp, %esp
95 ; X64: # %bb.0: # %entry
96 ; X64-NEXT: movq (%rdi), %mm0
97 ; X64-NEXT: movd (%rsi), %mm1
98 ; X64-NEXT: psllw %mm1, %mm0
99 ; X64-NEXT: movq %mm0, %rax
102 %0 = load x86_mmx, ptr %a, align 8
103 %1 = load i32, ptr %b, align 4
104 %2 = tail call x86_mmx @llvm.x86.mmx.pslli.w(x86_mmx %0, i32 %1)
105 %3 = bitcast x86_mmx %2 to i64
108 declare x86_mmx @llvm.x86.mmx.pslli.w(x86_mmx, i32)
110 define i64 @t3(ptr %a, ptr %b) nounwind {
112 ; X86: # %bb.0: # %entry
113 ; X86-NEXT: pushl %ebp
114 ; X86-NEXT: movl %esp, %ebp
115 ; X86-NEXT: andl $-8, %esp
116 ; X86-NEXT: subl $8, %esp
117 ; X86-NEXT: movl 12(%ebp), %eax
118 ; X86-NEXT: movl 8(%ebp), %ecx
119 ; X86-NEXT: movq (%ecx), %mm0
120 ; X86-NEXT: movd (%eax), %mm1
121 ; X86-NEXT: psrlw %mm1, %mm0
122 ; X86-NEXT: movq %mm0, (%esp)
123 ; X86-NEXT: movl (%esp), %eax
124 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
125 ; X86-NEXT: movl %ebp, %esp
126 ; X86-NEXT: popl %ebp
130 ; X64: # %bb.0: # %entry
131 ; X64-NEXT: movq (%rdi), %mm0
132 ; X64-NEXT: movd (%rsi), %mm1
133 ; X64-NEXT: psrlw %mm1, %mm0
134 ; X64-NEXT: movq %mm0, %rax
137 %0 = load x86_mmx, ptr %a, align 8
138 %1 = load i32, ptr %b, align 4
139 %2 = tail call x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx %0, i32 %1)
140 %3 = bitcast x86_mmx %2 to i64
143 declare x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx, i32)
145 define i64 @t4(ptr %a, ptr %b) nounwind {
147 ; X86: # %bb.0: # %entry
148 ; X86-NEXT: pushl %ebp
149 ; X86-NEXT: movl %esp, %ebp
150 ; X86-NEXT: andl $-8, %esp
151 ; X86-NEXT: subl $8, %esp
152 ; X86-NEXT: movl 12(%ebp), %eax
153 ; X86-NEXT: movl 8(%ebp), %ecx
154 ; X86-NEXT: movq (%ecx), %mm0
155 ; X86-NEXT: movd (%eax), %mm1
156 ; X86-NEXT: pslld %mm1, %mm0
157 ; X86-NEXT: movq %mm0, (%esp)
158 ; X86-NEXT: movl (%esp), %eax
159 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
160 ; X86-NEXT: movl %ebp, %esp
161 ; X86-NEXT: popl %ebp
165 ; X64: # %bb.0: # %entry
166 ; X64-NEXT: movq (%rdi), %mm0
167 ; X64-NEXT: movd (%rsi), %mm1
168 ; X64-NEXT: pslld %mm1, %mm0
169 ; X64-NEXT: movq %mm0, %rax
172 %0 = load x86_mmx, ptr %a, align 8
173 %1 = load i32, ptr %b, align 4
174 %2 = tail call x86_mmx @llvm.x86.mmx.pslli.d(x86_mmx %0, i32 %1)
175 %3 = bitcast x86_mmx %2 to i64
178 declare x86_mmx @llvm.x86.mmx.pslli.d(x86_mmx, i32)
180 define i64 @t5(ptr %a, ptr %b) nounwind {
182 ; X86: # %bb.0: # %entry
183 ; X86-NEXT: pushl %ebp
184 ; X86-NEXT: movl %esp, %ebp
185 ; X86-NEXT: andl $-8, %esp
186 ; X86-NEXT: subl $8, %esp
187 ; X86-NEXT: movl 12(%ebp), %eax
188 ; X86-NEXT: movl 8(%ebp), %ecx
189 ; X86-NEXT: movq (%ecx), %mm0
190 ; X86-NEXT: movd (%eax), %mm1
191 ; X86-NEXT: psrld %mm1, %mm0
192 ; X86-NEXT: movq %mm0, (%esp)
193 ; X86-NEXT: movl (%esp), %eax
194 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
195 ; X86-NEXT: movl %ebp, %esp
196 ; X86-NEXT: popl %ebp
200 ; X64: # %bb.0: # %entry
201 ; X64-NEXT: movq (%rdi), %mm0
202 ; X64-NEXT: movd (%rsi), %mm1
203 ; X64-NEXT: psrld %mm1, %mm0
204 ; X64-NEXT: movq %mm0, %rax
207 %0 = load x86_mmx, ptr %a, align 8
208 %1 = load i32, ptr %b, align 4
209 %2 = tail call x86_mmx @llvm.x86.mmx.psrli.d(x86_mmx %0, i32 %1)
210 %3 = bitcast x86_mmx %2 to i64
213 declare x86_mmx @llvm.x86.mmx.psrli.d(x86_mmx, i32)
215 define i64 @t6(ptr %a, ptr %b) nounwind {
217 ; X86: # %bb.0: # %entry
218 ; X86-NEXT: pushl %ebp
219 ; X86-NEXT: movl %esp, %ebp
220 ; X86-NEXT: andl $-8, %esp
221 ; X86-NEXT: subl $8, %esp
222 ; X86-NEXT: movl 12(%ebp), %eax
223 ; X86-NEXT: movl 8(%ebp), %ecx
224 ; X86-NEXT: movq (%ecx), %mm0
225 ; X86-NEXT: movd (%eax), %mm1
226 ; X86-NEXT: psraw %mm1, %mm0
227 ; X86-NEXT: movq %mm0, (%esp)
228 ; X86-NEXT: movl (%esp), %eax
229 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
230 ; X86-NEXT: movl %ebp, %esp
231 ; X86-NEXT: popl %ebp
235 ; X64: # %bb.0: # %entry
236 ; X64-NEXT: movq (%rdi), %mm0
237 ; X64-NEXT: movd (%rsi), %mm1
238 ; X64-NEXT: psraw %mm1, %mm0
239 ; X64-NEXT: movq %mm0, %rax
242 %0 = load x86_mmx, ptr %a, align 8
243 %1 = load i32, ptr %b, align 4
244 %2 = tail call x86_mmx @llvm.x86.mmx.psrai.w(x86_mmx %0, i32 %1)
245 %3 = bitcast x86_mmx %2 to i64
248 declare x86_mmx @llvm.x86.mmx.psrai.w(x86_mmx, i32)
250 define i64 @t7(ptr %a, ptr %b) nounwind {
252 ; X86: # %bb.0: # %entry
253 ; X86-NEXT: pushl %ebp
254 ; X86-NEXT: movl %esp, %ebp
255 ; X86-NEXT: andl $-8, %esp
256 ; X86-NEXT: subl $8, %esp
257 ; X86-NEXT: movl 12(%ebp), %eax
258 ; X86-NEXT: movl 8(%ebp), %ecx
259 ; X86-NEXT: movq (%ecx), %mm0
260 ; X86-NEXT: movd (%eax), %mm1
261 ; X86-NEXT: psrad %mm1, %mm0
262 ; X86-NEXT: movq %mm0, (%esp)
263 ; X86-NEXT: movl (%esp), %eax
264 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
265 ; X86-NEXT: movl %ebp, %esp
266 ; X86-NEXT: popl %ebp
270 ; X64: # %bb.0: # %entry
271 ; X64-NEXT: movq (%rdi), %mm0
272 ; X64-NEXT: movd (%rsi), %mm1
273 ; X64-NEXT: psrad %mm1, %mm0
274 ; X64-NEXT: movq %mm0, %rax
277 %0 = load x86_mmx, ptr %a, align 8
278 %1 = load i32, ptr %b, align 4
279 %2 = tail call x86_mmx @llvm.x86.mmx.psrai.d(x86_mmx %0, i32 %1)
280 %3 = bitcast x86_mmx %2 to i64
283 declare x86_mmx @llvm.x86.mmx.psrai.d(x86_mmx, i32)
285 define i64 @tt0(x86_mmx %t, ptr %q) nounwind {
287 ; X86: # %bb.0: # %entry
288 ; X86-NEXT: pushl %ebp
289 ; X86-NEXT: movl %esp, %ebp
290 ; X86-NEXT: andl $-8, %esp
291 ; X86-NEXT: subl $8, %esp
292 ; X86-NEXT: movl 8(%ebp), %eax
293 ; X86-NEXT: paddb (%eax), %mm0
294 ; X86-NEXT: movq %mm0, (%esp)
295 ; X86-NEXT: movl (%esp), %eax
296 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
298 ; X86-NEXT: movl %ebp, %esp
299 ; X86-NEXT: popl %ebp
303 ; X64: # %bb.0: # %entry
304 ; X64-NEXT: paddb (%rdi), %mm0
305 ; X64-NEXT: movq %mm0, %rax
309 %v = load x86_mmx, ptr %q
310 %u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %v)
311 %s = bitcast x86_mmx %u to i64
312 call void @llvm.x86.mmx.emms()
315 declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
316 declare void @llvm.x86.mmx.emms()
318 define i64 @tt1(x86_mmx %t, ptr %q) nounwind {
320 ; X86: # %bb.0: # %entry
321 ; X86-NEXT: pushl %ebp
322 ; X86-NEXT: movl %esp, %ebp
323 ; X86-NEXT: andl $-8, %esp
324 ; X86-NEXT: subl $8, %esp
325 ; X86-NEXT: movl 8(%ebp), %eax
326 ; X86-NEXT: paddw (%eax), %mm0
327 ; X86-NEXT: movq %mm0, (%esp)
328 ; X86-NEXT: movl (%esp), %eax
329 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
331 ; X86-NEXT: movl %ebp, %esp
332 ; X86-NEXT: popl %ebp
336 ; X64: # %bb.0: # %entry
337 ; X64-NEXT: paddw (%rdi), %mm0
338 ; X64-NEXT: movq %mm0, %rax
342 %v = load x86_mmx, ptr %q
343 %u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %v)
344 %s = bitcast x86_mmx %u to i64
345 call void @llvm.x86.mmx.emms()
348 declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
350 define i64 @tt2(x86_mmx %t, ptr %q) nounwind {
352 ; X86: # %bb.0: # %entry
353 ; X86-NEXT: pushl %ebp
354 ; X86-NEXT: movl %esp, %ebp
355 ; X86-NEXT: andl $-8, %esp
356 ; X86-NEXT: subl $8, %esp
357 ; X86-NEXT: movl 8(%ebp), %eax
358 ; X86-NEXT: paddd (%eax), %mm0
359 ; X86-NEXT: movq %mm0, (%esp)
360 ; X86-NEXT: movl (%esp), %eax
361 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
363 ; X86-NEXT: movl %ebp, %esp
364 ; X86-NEXT: popl %ebp
368 ; X64: # %bb.0: # %entry
369 ; X64-NEXT: paddd (%rdi), %mm0
370 ; X64-NEXT: movq %mm0, %rax
374 %v = load x86_mmx, ptr %q
375 %u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %v)
376 %s = bitcast x86_mmx %u to i64
377 call void @llvm.x86.mmx.emms()
380 declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
382 define i64 @tt3(x86_mmx %t, ptr %q) nounwind {
384 ; X86: # %bb.0: # %entry
385 ; X86-NEXT: pushl %ebp
386 ; X86-NEXT: movl %esp, %ebp
387 ; X86-NEXT: andl $-8, %esp
388 ; X86-NEXT: subl $8, %esp
389 ; X86-NEXT: movl 8(%ebp), %eax
390 ; X86-NEXT: paddq (%eax), %mm0
391 ; X86-NEXT: movq %mm0, (%esp)
392 ; X86-NEXT: movl (%esp), %eax
393 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
395 ; X86-NEXT: movl %ebp, %esp
396 ; X86-NEXT: popl %ebp
400 ; X64: # %bb.0: # %entry
401 ; X64-NEXT: paddq (%rdi), %mm0
402 ; X64-NEXT: movq %mm0, %rax
406 %v = load x86_mmx, ptr %q
407 %u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %v)
408 %s = bitcast x86_mmx %u to i64
409 call void @llvm.x86.mmx.emms()
412 declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
414 define i64 @tt4(x86_mmx %t, ptr %q) nounwind {
416 ; X86: # %bb.0: # %entry
417 ; X86-NEXT: pushl %ebp
418 ; X86-NEXT: movl %esp, %ebp
419 ; X86-NEXT: andl $-8, %esp
420 ; X86-NEXT: subl $8, %esp
421 ; X86-NEXT: movl 8(%ebp), %eax
422 ; X86-NEXT: paddusb (%eax), %mm0
423 ; X86-NEXT: movq %mm0, (%esp)
424 ; X86-NEXT: movl (%esp), %eax
425 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
427 ; X86-NEXT: movl %ebp, %esp
428 ; X86-NEXT: popl %ebp
432 ; X64: # %bb.0: # %entry
433 ; X64-NEXT: paddusb (%rdi), %mm0
434 ; X64-NEXT: movq %mm0, %rax
438 %v = load x86_mmx, ptr %q
439 %u = tail call x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx %t, x86_mmx %v)
440 %s = bitcast x86_mmx %u to i64
441 call void @llvm.x86.mmx.emms()
444 declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx)
446 define i64 @tt5(x86_mmx %t, ptr %q) nounwind {
448 ; X86: # %bb.0: # %entry
449 ; X86-NEXT: pushl %ebp
450 ; X86-NEXT: movl %esp, %ebp
451 ; X86-NEXT: andl $-8, %esp
452 ; X86-NEXT: subl $8, %esp
453 ; X86-NEXT: movl 8(%ebp), %eax
454 ; X86-NEXT: paddusw (%eax), %mm0
455 ; X86-NEXT: movq %mm0, (%esp)
456 ; X86-NEXT: movl (%esp), %eax
457 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
459 ; X86-NEXT: movl %ebp, %esp
460 ; X86-NEXT: popl %ebp
464 ; X64: # %bb.0: # %entry
465 ; X64-NEXT: paddusw (%rdi), %mm0
466 ; X64-NEXT: movq %mm0, %rax
470 %v = load x86_mmx, ptr %q
471 %u = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %t, x86_mmx %v)
472 %s = bitcast x86_mmx %u to i64
473 call void @llvm.x86.mmx.emms()
476 declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
478 define i64 @tt6(x86_mmx %t, ptr %q) nounwind {
480 ; X86: # %bb.0: # %entry
481 ; X86-NEXT: pushl %ebp
482 ; X86-NEXT: movl %esp, %ebp
483 ; X86-NEXT: andl $-8, %esp
484 ; X86-NEXT: subl $8, %esp
485 ; X86-NEXT: movl 8(%ebp), %eax
486 ; X86-NEXT: psrlw (%eax), %mm0
487 ; X86-NEXT: movq %mm0, (%esp)
488 ; X86-NEXT: movl (%esp), %eax
489 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
491 ; X86-NEXT: movl %ebp, %esp
492 ; X86-NEXT: popl %ebp
496 ; X64: # %bb.0: # %entry
497 ; X64-NEXT: psrlw (%rdi), %mm0
498 ; X64-NEXT: movq %mm0, %rax
502 %v = load x86_mmx, ptr %q
503 %u = tail call x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx %t, x86_mmx %v)
504 %s = bitcast x86_mmx %u to i64
505 call void @llvm.x86.mmx.emms()
508 declare x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx, x86_mmx)
510 define i64 @tt7(x86_mmx %t, ptr %q) nounwind {
512 ; X86: # %bb.0: # %entry
513 ; X86-NEXT: pushl %ebp
514 ; X86-NEXT: movl %esp, %ebp
515 ; X86-NEXT: andl $-8, %esp
516 ; X86-NEXT: subl $8, %esp
517 ; X86-NEXT: movl 8(%ebp), %eax
518 ; X86-NEXT: psrld (%eax), %mm0
519 ; X86-NEXT: movq %mm0, (%esp)
520 ; X86-NEXT: movl (%esp), %eax
521 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
523 ; X86-NEXT: movl %ebp, %esp
524 ; X86-NEXT: popl %ebp
528 ; X64: # %bb.0: # %entry
529 ; X64-NEXT: psrld (%rdi), %mm0
530 ; X64-NEXT: movq %mm0, %rax
534 %v = load x86_mmx, ptr %q
535 %u = tail call x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx %t, x86_mmx %v)
536 %s = bitcast x86_mmx %u to i64
537 call void @llvm.x86.mmx.emms()
540 declare x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx, x86_mmx)
542 define i64 @tt8(x86_mmx %t, ptr %q) nounwind {
544 ; X86: # %bb.0: # %entry
545 ; X86-NEXT: pushl %ebp
546 ; X86-NEXT: movl %esp, %ebp
547 ; X86-NEXT: andl $-8, %esp
548 ; X86-NEXT: subl $8, %esp
549 ; X86-NEXT: movl 8(%ebp), %eax
550 ; X86-NEXT: psrlq (%eax), %mm0
551 ; X86-NEXT: movq %mm0, (%esp)
552 ; X86-NEXT: movl (%esp), %eax
553 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
555 ; X86-NEXT: movl %ebp, %esp
556 ; X86-NEXT: popl %ebp
560 ; X64: # %bb.0: # %entry
561 ; X64-NEXT: psrlq (%rdi), %mm0
562 ; X64-NEXT: movq %mm0, %rax
566 %v = load x86_mmx, ptr %q
567 %u = tail call x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx %t, x86_mmx %v)
568 %s = bitcast x86_mmx %u to i64
569 call void @llvm.x86.mmx.emms()
572 declare x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx, x86_mmx)
574 define void @test_psrlq_by_volatile_shift_amount(ptr %t) nounwind {
575 ; X86-LABEL: test_psrlq_by_volatile_shift_amount:
576 ; X86: # %bb.0: # %entry
577 ; X86-NEXT: pushl %eax
578 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
579 ; X86-NEXT: movl $1, (%esp)
580 ; X86-NEXT: movl $255, %ecx
581 ; X86-NEXT: movd %ecx, %mm0
582 ; X86-NEXT: movd (%esp), %mm1
583 ; X86-NEXT: psrlq %mm1, %mm0
584 ; X86-NEXT: movq %mm0, (%eax)
585 ; X86-NEXT: popl %eax
588 ; X64-LABEL: test_psrlq_by_volatile_shift_amount:
589 ; X64: # %bb.0: # %entry
590 ; X64-NEXT: movl $1, -{{[0-9]+}}(%rsp)
591 ; X64-NEXT: movl $255, %eax
592 ; X64-NEXT: movd %eax, %mm0
593 ; X64-NEXT: movd -{{[0-9]+}}(%rsp), %mm1
594 ; X64-NEXT: psrlq %mm1, %mm0
595 ; X64-NEXT: movq %mm0, (%rdi)
598 %0 = alloca i32, align 4
599 call void @llvm.lifetime.start(i64 4, ptr nonnull %0)
600 store volatile i32 1, ptr %0, align 4
601 %1 = load volatile i32, ptr %0, align 4
602 %2 = tail call x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx bitcast (<1 x i64> <i64 255> to x86_mmx), i32 %1)
603 store x86_mmx %2, ptr %t, align 8
604 call void @llvm.lifetime.end(i64 4, ptr nonnull %0)
608 declare void @llvm.lifetime.start(i64, ptr nocapture)
609 declare void @llvm.lifetime.end(i64, ptr nocapture)
611 ; Make sure we shrink this vector load and fold it.
612 define x86_mmx @vec_load(ptr %x) {
613 ; X86-LABEL: vec_load:
615 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
616 ; X86-NEXT: pshufw $68, (%eax), %mm0 # mm0 = mem[0,1,0,1]
617 ; X86-NEXT: paddsb %mm0, %mm0
620 ; X64-LABEL: vec_load:
622 ; X64-NEXT: pshufw $68, (%rdi), %mm0 # mm0 = mem[0,1,0,1]
623 ; X64-NEXT: paddsb %mm0, %mm0
624 ; X64-NEXT: movq2dq %mm0, %xmm0
626 %z = load <4 x float>, ptr %x
627 %y = extractelement <4 x float> %z, i32 0
628 %a = insertelement <2 x float> undef, float %y, i32 0
629 %b = insertelement <2 x float> %a, float %y, i32 1
630 %c = bitcast <2 x float> %b to x86_mmx
631 %d = tail call x86_mmx @llvm.x86.mmx.padds.b(x86_mmx %c, x86_mmx %c)
635 declare x86_mmx @llvm.x86.mmx.padds.b(x86_mmx, x86_mmx)