1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefixes=X86,X86-NOBMI
3 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+bmi2 | FileCheck %s --check-prefixes=X86,X86-BMI
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64,X64-NOBMI
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi2 | FileCheck %s --check-prefixes=X64,X64-BMI
9 define i64 @foo(i64 %x, i64 %y) nounwind {
12 ; X86-NEXT: pushl %ebp
13 ; X86-NEXT: pushl %ebx
14 ; X86-NEXT: pushl %edi
15 ; X86-NEXT: pushl %esi
16 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
17 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
18 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
19 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
20 ; X86-NEXT: movl %ecx, %eax
22 ; X86-NEXT: movl %edx, %ebx
23 ; X86-NEXT: movl %esi, %eax
25 ; X86-NEXT: movl %edx, %ebp
26 ; X86-NEXT: movl %eax, %esi
27 ; X86-NEXT: addl %ebx, %esi
28 ; X86-NEXT: adcl $0, %ebp
29 ; X86-NEXT: movl %ecx, %eax
31 ; X86-NEXT: movl %edx, %ebx
32 ; X86-NEXT: addl %esi, %eax
33 ; X86-NEXT: adcl %ebp, %ebx
35 ; X86-NEXT: movzbl %al, %ecx
36 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
38 ; X86-NEXT: movl %edx, %esi
39 ; X86-NEXT: movl %eax, %ebp
40 ; X86-NEXT: addl %ebx, %ebp
41 ; X86-NEXT: adcl %ecx, %esi
42 ; X86-NEXT: xorl %ecx, %ecx
43 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
45 ; X86-NEXT: movl %edx, %edi
46 ; X86-NEXT: movl %eax, %ebx
47 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
49 ; X86-NEXT: addl %ebx, %eax
50 ; X86-NEXT: adcl %edi, %edx
51 ; X86-NEXT: addl %ebp, %eax
52 ; X86-NEXT: adcl %esi, %edx
61 ; X64-NEXT: movq %rdi, %rax
63 ; X64-NEXT: movq %rdx, %rax
65 %tmp0 = zext i64 %x to i128
66 %tmp1 = zext i64 %y to i128
67 %tmp2 = mul i128 %tmp0, %tmp1
68 %tmp7 = zext i32 64 to i128
69 %tmp3 = lshr i128 %tmp2, %tmp7
70 %tmp4 = trunc i128 %tmp3 to i64
74 ; <rdar://problem/14096009> superfluous multiply by high part of
75 ; zero-extended value.
77 define i64 @mul1(i64 %n, i64* nocapture %z, i64* nocapture %x, i64 %y) nounwind {
79 ; X86: # %bb.0: # %entry
80 ; X86-NEXT: pushl %ebp
81 ; X86-NEXT: pushl %ebx
82 ; X86-NEXT: pushl %edi
83 ; X86-NEXT: pushl %esi
84 ; X86-NEXT: subl $28, %esp
85 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
86 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
87 ; X86-NEXT: orl %ecx, %eax
88 ; X86-NEXT: je .LBB1_3
89 ; X86-NEXT: # %bb.1: # %for.body.preheader
90 ; X86-NEXT: xorl %eax, %eax
91 ; X86-NEXT: xorl %edx, %edx
92 ; X86-NEXT: xorl %ebx, %ebx
93 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
94 ; X86-NEXT: .p2align 4, 0x90
95 ; X86-NEXT: .LBB1_2: # %for.body
96 ; X86-NEXT: # =>This Inner Loop Header: Depth=1
97 ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
98 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
99 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
100 ; X86-NEXT: movl %eax, %ecx
101 ; X86-NEXT: movl (%eax,%ebx,8), %ebp
102 ; X86-NEXT: movl 4(%eax,%ebx,8), %esi
103 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
104 ; X86-NEXT: movl %ebp, %eax
105 ; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
106 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
107 ; X86-NEXT: mull %ecx
108 ; X86-NEXT: movl %edx, %edi
109 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
110 ; X86-NEXT: movl %esi, %eax
111 ; X86-NEXT: mull %ecx
112 ; X86-NEXT: movl %edx, %ecx
113 ; X86-NEXT: movl %eax, %esi
114 ; X86-NEXT: addl %edi, %esi
115 ; X86-NEXT: adcl $0, %ecx
116 ; X86-NEXT: movl %ebp, %eax
117 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
118 ; X86-NEXT: mull %edx
119 ; X86-NEXT: movl %edx, %ebp
120 ; X86-NEXT: movl %eax, %edi
121 ; X86-NEXT: addl %esi, %edi
122 ; X86-NEXT: adcl %ecx, %ebp
123 ; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
124 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
125 ; X86-NEXT: mull {{[0-9]+}}(%esp)
126 ; X86-NEXT: movl %edx, %ecx
127 ; X86-NEXT: movl %eax, %esi
128 ; X86-NEXT: addl %ebp, %esi
129 ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
130 ; X86-NEXT: adcl %eax, %ecx
131 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
132 ; X86-NEXT: xorl %edx, %edx
133 ; X86-NEXT: mull %edx
134 ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
135 ; X86-NEXT: movl %eax, %ebp
136 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
137 ; X86-NEXT: xorl %edx, %edx
138 ; X86-NEXT: mull %edx
139 ; X86-NEXT: addl %ebp, %eax
140 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
141 ; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
142 ; X86-NEXT: addl %esi, %eax
143 ; X86-NEXT: adcl %ecx, %edx
144 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
145 ; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
146 ; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
147 ; X86-NEXT: adcl $0, %eax
148 ; X86-NEXT: adcl $0, %edx
149 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
150 ; X86-NEXT: movl %esi, (%ecx,%ebx,8)
151 ; X86-NEXT: movl %edi, 4(%ecx,%ebx,8)
152 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
153 ; X86-NEXT: movl %ecx, %edi
154 ; X86-NEXT: addl $1, %ebx
155 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
156 ; X86-NEXT: adcl $0, %esi
157 ; X86-NEXT: movl %ebx, %ecx
158 ; X86-NEXT: xorl %ebp, %ecx
159 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
160 ; X86-NEXT: xorl %edi, %esi
161 ; X86-NEXT: orl %ecx, %esi
162 ; X86-NEXT: jne .LBB1_2
163 ; X86-NEXT: .LBB1_3: # %for.end
164 ; X86-NEXT: xorl %eax, %eax
165 ; X86-NEXT: xorl %edx, %edx
166 ; X86-NEXT: addl $28, %esp
167 ; X86-NEXT: popl %esi
168 ; X86-NEXT: popl %edi
169 ; X86-NEXT: popl %ebx
170 ; X86-NEXT: popl %ebp
174 ; X64: # %bb.0: # %entry
175 ; X64-NEXT: testq %rdi, %rdi
176 ; X64-NEXT: je .LBB1_3
177 ; X64-NEXT: # %bb.1: # %for.body.preheader
178 ; X64-NEXT: movq %rcx, %r8
179 ; X64-NEXT: movq %rdx, %r9
180 ; X64-NEXT: xorl %r10d, %r10d
181 ; X64-NEXT: xorl %ecx, %ecx
182 ; X64-NEXT: .p2align 4, 0x90
183 ; X64-NEXT: .LBB1_2: # %for.body
184 ; X64-NEXT: # =>This Inner Loop Header: Depth=1
185 ; X64-NEXT: movq %r8, %rax
186 ; X64-NEXT: mulq (%r9,%rcx,8)
187 ; X64-NEXT: addq %r10, %rax
188 ; X64-NEXT: adcq $0, %rdx
189 ; X64-NEXT: movq %rax, (%rsi,%rcx,8)
190 ; X64-NEXT: incq %rcx
191 ; X64-NEXT: cmpq %rcx, %rdi
192 ; X64-NEXT: movq %rdx, %r10
193 ; X64-NEXT: jne .LBB1_2
194 ; X64-NEXT: .LBB1_3: # %for.end
195 ; X64-NEXT: xorl %eax, %eax
198 %conv = zext i64 %y to i128
199 %cmp11 = icmp eq i64 %n, 0
200 br i1 %cmp11, label %for.end, label %for.body
202 for.body: ; preds = %entry, %for.body
203 %carry.013 = phi i64 [ %conv6, %for.body ], [ 0, %entry ]
204 %i.012 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
205 %arrayidx = getelementptr inbounds i64, i64* %x, i64 %i.012
206 %0 = load i64, i64* %arrayidx, align 8
207 %conv2 = zext i64 %0 to i128
208 %mul = mul i128 %conv2, %conv
209 %conv3 = zext i64 %carry.013 to i128
210 %add = add i128 %mul, %conv3
211 %conv4 = trunc i128 %add to i64
212 %arrayidx5 = getelementptr inbounds i64, i64* %z, i64 %i.012
213 store i64 %conv4, i64* %arrayidx5, align 8
214 %shr = lshr i128 %add, 64
215 %conv6 = trunc i128 %shr to i64
216 %inc = add i64 %i.012, 1
217 %exitcond = icmp eq i64 %inc, %n
218 br i1 %exitcond, label %for.end, label %for.body
220 for.end: ; preds = %for.body, %entry