1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+mmx < %s | FileCheck %s --check-prefix=X64
3 ; RUN: llc -mtriple=i686-unknown-unknown -mattr=+mmx < %s | FileCheck %s --check-prefix=X86
6 ; From source: clang -02
9 ; __m64 x = (a)? (__m64)(7): (__m64)(0);
10 ; return __builtin_ia32_psllw(x, x);
13 define i64 @test47(i64 %arg) {
17 ; X64-NEXT: testq %rdi, %rdi
18 ; X64-NEXT: je .LBB0_1
20 ; X64-NEXT: pxor %mm0, %mm0
21 ; X64-NEXT: jmp .LBB0_3
23 ; X64-NEXT: movl $7, %eax
24 ; X64-NEXT: movd %eax, %mm0
26 ; X64-NEXT: psllw %mm0, %mm0
27 ; X64-NEXT: movq %mm0, %rax
32 ; X86-NEXT: pushl %ebp
33 ; X86-NEXT: .cfi_def_cfa_offset 8
34 ; X86-NEXT: .cfi_offset %ebp, -8
35 ; X86-NEXT: movl %esp, %ebp
36 ; X86-NEXT: .cfi_def_cfa_register %ebp
37 ; X86-NEXT: andl $-8, %esp
38 ; X86-NEXT: subl $8, %esp
39 ; X86-NEXT: movl 8(%ebp), %eax
40 ; X86-NEXT: orl 12(%ebp), %eax
41 ; X86-NEXT: je .LBB0_1
43 ; X86-NEXT: pxor %mm0, %mm0
44 ; X86-NEXT: jmp .LBB0_3
46 ; X86-NEXT: movl $7, %eax
47 ; X86-NEXT: movd %eax, %mm0
49 ; X86-NEXT: psllw %mm0, %mm0
50 ; X86-NEXT: movq %mm0, (%esp)
51 ; X86-NEXT: movl (%esp), %eax
52 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
53 ; X86-NEXT: movl %ebp, %esp
55 ; X86-NEXT: .cfi_def_cfa %esp, 4
57 %cond = icmp eq i64 %arg, 0
58 %slct = select i1 %cond, x86_mmx bitcast (i64 7 to x86_mmx), x86_mmx bitcast (i64 0 to x86_mmx)
59 %psll = tail call x86_mmx @llvm.x86.mmx.psll.w(x86_mmx %slct, x86_mmx %slct)
60 %retc = bitcast x86_mmx %psll to i64
65 ; From source: clang -O2
66 ;__m64 test49(int a, long long n, long long m)
68 ; __m64 x = (a)? (__m64)(n): (__m64)(m);
69 ; return __builtin_ia32_psllw(x, x);
72 define i64 @test49(i64 %arg, i64 %x, i64 %y) {
76 ; X64-NEXT: testq %rdi, %rdi
77 ; X64-NEXT: je .LBB1_1
79 ; X64-NEXT: movq %rdx, %mm0
80 ; X64-NEXT: jmp .LBB1_3
82 ; X64-NEXT: movq %rsi, %mm0
84 ; X64-NEXT: psllw %mm0, %mm0
85 ; X64-NEXT: movq %mm0, %rax
90 ; X86-NEXT: pushl %ebp
91 ; X86-NEXT: .cfi_def_cfa_offset 8
92 ; X86-NEXT: .cfi_offset %ebp, -8
93 ; X86-NEXT: movl %esp, %ebp
94 ; X86-NEXT: .cfi_def_cfa_register %ebp
95 ; X86-NEXT: andl $-8, %esp
96 ; X86-NEXT: subl $8, %esp
97 ; X86-NEXT: movl 8(%ebp), %eax
98 ; X86-NEXT: orl 12(%ebp), %eax
99 ; X86-NEXT: je .LBB1_1
101 ; X86-NEXT: leal 24(%ebp), %eax
102 ; X86-NEXT: jmp .LBB1_3
104 ; X86-NEXT: leal 16(%ebp), %eax
106 ; X86-NEXT: movq (%eax), %mm0
107 ; X86-NEXT: psllw %mm0, %mm0
108 ; X86-NEXT: movq %mm0, (%esp)
109 ; X86-NEXT: movl (%esp), %eax
110 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
111 ; X86-NEXT: movl %ebp, %esp
112 ; X86-NEXT: popl %ebp
113 ; X86-NEXT: .cfi_def_cfa %esp, 4
115 %cond = icmp eq i64 %arg, 0
116 %xmmx = bitcast i64 %x to x86_mmx
117 %ymmx = bitcast i64 %y to x86_mmx
118 %slct = select i1 %cond, x86_mmx %xmmx, x86_mmx %ymmx
119 %psll = tail call x86_mmx @llvm.x86.mmx.psll.w(x86_mmx %slct, x86_mmx %slct)
120 %retc = bitcast x86_mmx %psll to i64
124 declare x86_mmx @llvm.x86.mmx.psll.w(x86_mmx, x86_mmx)