1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-- | FileCheck %s --check-prefixes=CHECK,X86
3 ; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s --check-prefixes=CHECK,X64
9 define void @test_lshr_i128(i128 %x, i128 %a, i128* nocapture %r) nounwind {
10 ; X86-LABEL: test_lshr_i128:
11 ; X86: # %bb.0: # %entry
12 ; X86-NEXT: pushl %ebp
13 ; X86-NEXT: pushl %ebx
14 ; X86-NEXT: pushl %edi
15 ; X86-NEXT: pushl %esi
16 ; X86-NEXT: subl $20, %esp
17 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
18 ; X86-NEXT: movb {{[0-9]+}}(%esp), %al
19 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
20 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
21 ; X86-NEXT: movl %ebp, %esi
22 ; X86-NEXT: movl %eax, %ecx
23 ; X86-NEXT: shrdl %cl, %edi, %esi
24 ; X86-NEXT: shrl %cl, %edx
25 ; X86-NEXT: shrl %cl, %edi
26 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
27 ; X86-NEXT: testb $32, %al
28 ; X86-NEXT: jne .LBB0_1
29 ; X86-NEXT: # %bb.2: # %entry
30 ; X86-NEXT: movl %edx, (%esp) # 4-byte Spill
31 ; X86-NEXT: jmp .LBB0_3
33 ; X86-NEXT: movl %edi, %esi
34 ; X86-NEXT: movl $0, (%esp) # 4-byte Folded Spill
35 ; X86-NEXT: xorl %edi, %edi
36 ; X86-NEXT: .LBB0_3: # %entry
37 ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
38 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
39 ; X86-NEXT: movl %eax, %edx
40 ; X86-NEXT: subb $64, %dl
41 ; X86-NEXT: jb .LBB0_5
42 ; X86-NEXT: # %bb.4: # %entry
43 ; X86-NEXT: xorl %edi, %edi
44 ; X86-NEXT: .LBB0_5: # %entry
45 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
47 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
48 ; X86-NEXT: movl %edx, %ecx
49 ; X86-NEXT: shldl %cl, %ebp, %edi
50 ; X86-NEXT: movl %ebp, %esi
51 ; X86-NEXT: shll %cl, %esi
52 ; X86-NEXT: testb $32, %dl
53 ; X86-NEXT: movl %esi, %ebx
54 ; X86-NEXT: jne .LBB0_7
55 ; X86-NEXT: # %bb.6: # %entry
56 ; X86-NEXT: movl %edi, %ebx
57 ; X86-NEXT: .LBB0_7: # %entry
58 ; X86-NEXT: movb %al, %ah
59 ; X86-NEXT: addb $-64, %ah
60 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
61 ; X86-NEXT: movb %ah, %cl
62 ; X86-NEXT: shrl %cl, %edi
63 ; X86-NEXT: testb $32, %ah
64 ; X86-NEXT: movl $0, %ecx
65 ; X86-NEXT: jne .LBB0_9
66 ; X86-NEXT: # %bb.8: # %entry
67 ; X86-NEXT: movl %edi, %ecx
68 ; X86-NEXT: .LBB0_9: # %entry
69 ; X86-NEXT: cmpb $64, %al
70 ; X86-NEXT: jb .LBB0_10
71 ; X86-NEXT: # %bb.11: # %entry
72 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
73 ; X86-NEXT: jmp .LBB0_12
75 ; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
76 ; X86-NEXT: orl %ebx, %ecx
77 ; X86-NEXT: .LBB0_12: # %entry
78 ; X86-NEXT: movl %ecx, (%esp) # 4-byte Spill
79 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
80 ; X86-NEXT: testb $32, %dl
81 ; X86-NEXT: jne .LBB0_14
82 ; X86-NEXT: # %bb.13: # %entry
83 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
84 ; X86-NEXT: .LBB0_14: # %entry
85 ; X86-NEXT: movl %ebx, %edx
86 ; X86-NEXT: movl %eax, %ecx
87 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
88 ; X86-NEXT: shrdl %cl, %esi, %edx
89 ; X86-NEXT: testb $32, %al
90 ; X86-NEXT: jne .LBB0_16
91 ; X86-NEXT: # %bb.15: # %entry
92 ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
93 ; X86-NEXT: .LBB0_16: # %entry
94 ; X86-NEXT: movb %ah, %cl
95 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
96 ; X86-NEXT: shrdl %cl, %edx, %ebp
97 ; X86-NEXT: testb $32, %ah
98 ; X86-NEXT: jne .LBB0_18
99 ; X86-NEXT: # %bb.17: # %entry
100 ; X86-NEXT: movl %ebp, %edi
101 ; X86-NEXT: .LBB0_18: # %entry
102 ; X86-NEXT: cmpb $64, %al
103 ; X86-NEXT: jae .LBB0_20
104 ; X86-NEXT: # %bb.19:
105 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
106 ; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
107 ; X86-NEXT: .LBB0_20: # %entry
108 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
109 ; X86-NEXT: testb %al, %al
110 ; X86-NEXT: je .LBB0_22
111 ; X86-NEXT: # %bb.21: # %entry
112 ; X86-NEXT: movl %edi, %ebx
113 ; X86-NEXT: movl (%esp), %esi # 4-byte Reload
114 ; X86-NEXT: .LBB0_22: # %entry
115 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
116 ; X86-NEXT: movl %eax, 12(%ecx)
117 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
118 ; X86-NEXT: movl %eax, 8(%ecx)
119 ; X86-NEXT: movl %esi, 4(%ecx)
120 ; X86-NEXT: movl %ebx, (%ecx)
121 ; X86-NEXT: addl $20, %esp
122 ; X86-NEXT: popl %esi
123 ; X86-NEXT: popl %edi
124 ; X86-NEXT: popl %ebx
125 ; X86-NEXT: popl %ebp
128 ; X64-LABEL: test_lshr_i128:
129 ; X64: # %bb.0: # %entry
130 ; X64-NEXT: movq %rdx, %rcx
131 ; X64-NEXT: shrdq %cl, %rsi, %rdi
132 ; X64-NEXT: shrq %cl, %rsi
133 ; X64-NEXT: xorl %eax, %eax
134 ; X64-NEXT: testb $64, %cl
135 ; X64-NEXT: cmovneq %rsi, %rdi
136 ; X64-NEXT: cmoveq %rsi, %rax
137 ; X64-NEXT: movq %rax, 8(%r8)
138 ; X64-NEXT: movq %rdi, (%r8)
141 %0 = lshr i128 %x, %a
142 store i128 %0, i128* %r, align 16
146 define void @test_ashr_i128(i128 %x, i128 %a, i128* nocapture %r) nounwind {
147 ; X86-LABEL: test_ashr_i128:
148 ; X86: # %bb.0: # %entry
149 ; X86-NEXT: pushl %ebp
150 ; X86-NEXT: pushl %ebx
151 ; X86-NEXT: pushl %edi
152 ; X86-NEXT: pushl %esi
153 ; X86-NEXT: subl $24, %esp
154 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
155 ; X86-NEXT: movb {{[0-9]+}}(%esp), %al
156 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
157 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
158 ; X86-NEXT: movl %ebp, %esi
159 ; X86-NEXT: movl %eax, %ecx
160 ; X86-NEXT: shrdl %cl, %ebx, %esi
161 ; X86-NEXT: shrl %cl, %edx
162 ; X86-NEXT: movl %ebx, %edi
163 ; X86-NEXT: sarl %cl, %edi
164 ; X86-NEXT: sarl $31, %ebx
165 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
166 ; X86-NEXT: testb $32, %al
167 ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
168 ; X86-NEXT: jne .LBB1_1
169 ; X86-NEXT: # %bb.2: # %entry
170 ; X86-NEXT: movl %edx, (%esp) # 4-byte Spill
171 ; X86-NEXT: jmp .LBB1_3
173 ; X86-NEXT: movl %edi, %esi
174 ; X86-NEXT: movl $0, (%esp) # 4-byte Folded Spill
175 ; X86-NEXT: movl %ebx, %edi
176 ; X86-NEXT: .LBB1_3: # %entry
177 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
178 ; X86-NEXT: movl %eax, %edx
179 ; X86-NEXT: subb $64, %dl
180 ; X86-NEXT: jb .LBB1_5
181 ; X86-NEXT: # %bb.4: # %entry
182 ; X86-NEXT: movl %ebx, %edi
183 ; X86-NEXT: .LBB1_5: # %entry
184 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
186 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
187 ; X86-NEXT: movl %edx, %ecx
188 ; X86-NEXT: shldl %cl, %ebp, %edi
189 ; X86-NEXT: movl %ebp, %esi
190 ; X86-NEXT: shll %cl, %esi
191 ; X86-NEXT: testb $32, %dl
192 ; X86-NEXT: movl %esi, %ecx
193 ; X86-NEXT: jne .LBB1_7
194 ; X86-NEXT: # %bb.6: # %entry
195 ; X86-NEXT: movl %edi, %ecx
196 ; X86-NEXT: .LBB1_7: # %entry
197 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
198 ; X86-NEXT: movb %al, %ah
199 ; X86-NEXT: addb $-64, %ah
200 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
201 ; X86-NEXT: movb %ah, %cl
202 ; X86-NEXT: sarl %cl, %edi
203 ; X86-NEXT: testb $32, %ah
204 ; X86-NEXT: movl %ebx, %ecx
205 ; X86-NEXT: jne .LBB1_9
206 ; X86-NEXT: # %bb.8: # %entry
207 ; X86-NEXT: movl %edi, %ecx
208 ; X86-NEXT: .LBB1_9: # %entry
209 ; X86-NEXT: cmpb $64, %al
210 ; X86-NEXT: jb .LBB1_10
211 ; X86-NEXT: # %bb.11: # %entry
212 ; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
213 ; X86-NEXT: jmp .LBB1_12
214 ; X86-NEXT: .LBB1_10:
215 ; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
216 ; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
217 ; X86-NEXT: .LBB1_12: # %entry
218 ; X86-NEXT: movl %ecx, (%esp) # 4-byte Spill
219 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
220 ; X86-NEXT: testb $32, %dl
221 ; X86-NEXT: jne .LBB1_14
222 ; X86-NEXT: # %bb.13: # %entry
223 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
224 ; X86-NEXT: .LBB1_14: # %entry
225 ; X86-NEXT: movl %ebx, %edx
226 ; X86-NEXT: movl %eax, %ecx
227 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
228 ; X86-NEXT: shrdl %cl, %esi, %edx
229 ; X86-NEXT: testb $32, %al
230 ; X86-NEXT: jne .LBB1_16
231 ; X86-NEXT: # %bb.15: # %entry
232 ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
233 ; X86-NEXT: .LBB1_16: # %entry
234 ; X86-NEXT: movb %ah, %cl
235 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
236 ; X86-NEXT: shrdl %cl, %edx, %ebp
237 ; X86-NEXT: testb $32, %ah
238 ; X86-NEXT: jne .LBB1_18
239 ; X86-NEXT: # %bb.17: # %entry
240 ; X86-NEXT: movl %ebp, %edi
241 ; X86-NEXT: .LBB1_18: # %entry
242 ; X86-NEXT: cmpb $64, %al
243 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
244 ; X86-NEXT: jae .LBB1_20
245 ; X86-NEXT: # %bb.19:
246 ; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
247 ; X86-NEXT: movl %ecx, %edi
248 ; X86-NEXT: .LBB1_20: # %entry
249 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
250 ; X86-NEXT: testb %al, %al
251 ; X86-NEXT: je .LBB1_22
252 ; X86-NEXT: # %bb.21: # %entry
253 ; X86-NEXT: movl %edi, %ebx
254 ; X86-NEXT: movl (%esp), %esi # 4-byte Reload
255 ; X86-NEXT: .LBB1_22: # %entry
256 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
257 ; X86-NEXT: movl %eax, 12(%ecx)
258 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
259 ; X86-NEXT: movl %eax, 8(%ecx)
260 ; X86-NEXT: movl %esi, 4(%ecx)
261 ; X86-NEXT: movl %ebx, (%ecx)
262 ; X86-NEXT: addl $24, %esp
263 ; X86-NEXT: popl %esi
264 ; X86-NEXT: popl %edi
265 ; X86-NEXT: popl %ebx
266 ; X86-NEXT: popl %ebp
269 ; X64-LABEL: test_ashr_i128:
270 ; X64: # %bb.0: # %entry
271 ; X64-NEXT: movq %rdx, %rcx
272 ; X64-NEXT: shrdq %cl, %rsi, %rdi
273 ; X64-NEXT: movq %rsi, %rax
274 ; X64-NEXT: sarq %cl, %rax
275 ; X64-NEXT: sarq $63, %rsi
276 ; X64-NEXT: testb $64, %cl
277 ; X64-NEXT: cmovneq %rax, %rdi
278 ; X64-NEXT: cmoveq %rax, %rsi
279 ; X64-NEXT: movq %rsi, 8(%r8)
280 ; X64-NEXT: movq %rdi, (%r8)
283 %0 = ashr i128 %x, %a
284 store i128 %0, i128* %r, align 16
288 define void @test_shl_i128(i128 %x, i128 %a, i128* nocapture %r) nounwind {
289 ; X86-LABEL: test_shl_i128:
290 ; X86: # %bb.0: # %entry
291 ; X86-NEXT: pushl %ebp
292 ; X86-NEXT: pushl %ebx
293 ; X86-NEXT: pushl %edi
294 ; X86-NEXT: pushl %esi
295 ; X86-NEXT: subl $20, %esp
296 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
297 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
298 ; X86-NEXT: movb {{[0-9]+}}(%esp), %al
299 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
300 ; X86-NEXT: movl %ecx, %ebx
301 ; X86-NEXT: movl %eax, %ecx
302 ; X86-NEXT: shll %cl, %ebx
303 ; X86-NEXT: movl %ebp, %esi
304 ; X86-NEXT: shll %cl, %esi
305 ; X86-NEXT: movl %edi, %edx
306 ; X86-NEXT: shldl %cl, %ebp, %edx
307 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
308 ; X86-NEXT: testb $32, %al
309 ; X86-NEXT: jne .LBB2_1
310 ; X86-NEXT: # %bb.2: # %entry
311 ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
312 ; X86-NEXT: movl %ebx, (%esp) # 4-byte Spill
313 ; X86-NEXT: jmp .LBB2_3
315 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
316 ; X86-NEXT: movl $0, (%esp) # 4-byte Folded Spill
317 ; X86-NEXT: xorl %esi, %esi
318 ; X86-NEXT: .LBB2_3: # %entry
319 ; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
320 ; X86-NEXT: movl %eax, %edx
321 ; X86-NEXT: subb $64, %dl
322 ; X86-NEXT: jb .LBB2_5
323 ; X86-NEXT: # %bb.4: # %entry
324 ; X86-NEXT: xorl %esi, %esi
325 ; X86-NEXT: .LBB2_5: # %entry
326 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
328 ; X86-NEXT: movl %edi, %esi
329 ; X86-NEXT: movl %edx, %ecx
330 ; X86-NEXT: shrl %cl, %esi
331 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
332 ; X86-NEXT: shrdl %cl, %edi, %ebx
333 ; X86-NEXT: testb $32, %dl
334 ; X86-NEXT: movl %esi, %ebp
335 ; X86-NEXT: jne .LBB2_7
336 ; X86-NEXT: # %bb.6: # %entry
337 ; X86-NEXT: movl %ebx, %ebp
338 ; X86-NEXT: .LBB2_7: # %entry
339 ; X86-NEXT: movb %al, %ah
340 ; X86-NEXT: addb $-64, %ah
341 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
342 ; X86-NEXT: movb %ah, %cl
343 ; X86-NEXT: shll %cl, %ebx
344 ; X86-NEXT: testb $32, %ah
345 ; X86-NEXT: movl $0, %ecx
346 ; X86-NEXT: jne .LBB2_9
347 ; X86-NEXT: # %bb.8: # %entry
348 ; X86-NEXT: movl %ebx, %ecx
349 ; X86-NEXT: .LBB2_9: # %entry
350 ; X86-NEXT: cmpb $64, %al
351 ; X86-NEXT: jb .LBB2_10
352 ; X86-NEXT: # %bb.11: # %entry
353 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
354 ; X86-NEXT: jmp .LBB2_12
355 ; X86-NEXT: .LBB2_10:
356 ; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
357 ; X86-NEXT: orl %ebp, %ecx
358 ; X86-NEXT: .LBB2_12: # %entry
359 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
360 ; X86-NEXT: movl %ecx, (%esp) # 4-byte Spill
361 ; X86-NEXT: testb $32, %dl
362 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
363 ; X86-NEXT: jne .LBB2_14
364 ; X86-NEXT: # %bb.13: # %entry
365 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
366 ; X86-NEXT: .LBB2_14: # %entry
367 ; X86-NEXT: movl %edx, %esi
368 ; X86-NEXT: movl %eax, %ecx
369 ; X86-NEXT: shldl %cl, %ebp, %esi
370 ; X86-NEXT: testb $32, %al
371 ; X86-NEXT: jne .LBB2_16
372 ; X86-NEXT: # %bb.15: # %entry
373 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
374 ; X86-NEXT: .LBB2_16: # %entry
375 ; X86-NEXT: movb %ah, %cl
376 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
377 ; X86-NEXT: shldl %cl, %esi, %edi
378 ; X86-NEXT: testb $32, %ah
379 ; X86-NEXT: jne .LBB2_18
380 ; X86-NEXT: # %bb.17: # %entry
381 ; X86-NEXT: movl %edi, %ebx
382 ; X86-NEXT: .LBB2_18: # %entry
383 ; X86-NEXT: cmpb $64, %al
384 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
385 ; X86-NEXT: jae .LBB2_20
386 ; X86-NEXT: # %bb.19:
387 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
388 ; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
389 ; X86-NEXT: .LBB2_20: # %entry
390 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
391 ; X86-NEXT: testb %al, %al
392 ; X86-NEXT: je .LBB2_22
393 ; X86-NEXT: # %bb.21: # %entry
394 ; X86-NEXT: movl %ebx, %edx
395 ; X86-NEXT: movl (%esp), %ebp # 4-byte Reload
396 ; X86-NEXT: .LBB2_22: # %entry
397 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
398 ; X86-NEXT: movl %eax, 4(%ecx)
399 ; X86-NEXT: movl %esi, (%ecx)
400 ; X86-NEXT: movl %edx, 12(%ecx)
401 ; X86-NEXT: movl %ebp, 8(%ecx)
402 ; X86-NEXT: addl $20, %esp
403 ; X86-NEXT: popl %esi
404 ; X86-NEXT: popl %edi
405 ; X86-NEXT: popl %ebx
406 ; X86-NEXT: popl %ebp
409 ; X64-LABEL: test_shl_i128:
410 ; X64: # %bb.0: # %entry
411 ; X64-NEXT: movq %rdx, %rcx
412 ; X64-NEXT: shldq %cl, %rdi, %rsi
413 ; X64-NEXT: shlq %cl, %rdi
414 ; X64-NEXT: xorl %eax, %eax
415 ; X64-NEXT: testb $64, %cl
416 ; X64-NEXT: cmovneq %rdi, %rsi
417 ; X64-NEXT: cmoveq %rdi, %rax
418 ; X64-NEXT: movq %rsi, 8(%r8)
419 ; X64-NEXT: movq %rax, (%r8)
423 store i128 %0, i128* %r, align 16
427 define void @test_lshr_i128_outofrange(i128 %x, i128* nocapture %r) nounwind {
428 ; CHECK-LABEL: test_lshr_i128_outofrange:
429 ; CHECK: # %bb.0: # %entry
430 ; CHECK-NEXT: ret{{[l|q]}}
432 %0 = lshr i128 %x, -1
433 store i128 %0, i128* %r, align 16
437 define void @test_ashr_i128_outofrange(i128 %x, i128* nocapture %r) nounwind {
438 ; CHECK-LABEL: test_ashr_i128_outofrange:
439 ; CHECK: # %bb.0: # %entry
440 ; CHECK-NEXT: ret{{[l|q]}}
442 %0 = ashr i128 %x, -1
443 store i128 %0, i128* %r, align 16
447 define void @test_shl_i128_outofrange(i128 %x, i128* nocapture %r) nounwind {
448 ; CHECK-LABEL: test_shl_i128_outofrange:
449 ; CHECK: # %bb.0: # %entry
450 ; CHECK-NEXT: ret{{[l|q]}}
453 store i128 %0, i128* %r, align 16
461 define void @test_lshr_v2i128(<2 x i128> %x, <2 x i128> %a, <2 x i128>* nocapture %r) nounwind {
462 ; X86-LABEL: test_lshr_v2i128:
463 ; X86: # %bb.0: # %entry
464 ; X86-NEXT: pushl %ebp
465 ; X86-NEXT: pushl %ebx
466 ; X86-NEXT: pushl %edi
467 ; X86-NEXT: pushl %esi
468 ; X86-NEXT: subl $68, %esp
469 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
470 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
471 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
472 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
473 ; X86-NEXT: movl %ebx, %edi
474 ; X86-NEXT: movl %eax, %ecx
475 ; X86-NEXT: shrl %cl, %edi
476 ; X86-NEXT: movl %esi, %ebp
477 ; X86-NEXT: shrl %cl, %ebp
478 ; X86-NEXT: shrdl %cl, %esi, %edx
479 ; X86-NEXT: testb $32, %al
480 ; X86-NEXT: jne .LBB6_1
481 ; X86-NEXT: # %bb.2: # %entry
482 ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
483 ; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
484 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
485 ; X86-NEXT: jmp .LBB6_3
487 ; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
488 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
489 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
490 ; X86-NEXT: .LBB6_3: # %entry
491 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
492 ; X86-NEXT: movl %eax, %ecx
493 ; X86-NEXT: shrdl %cl, %ebx, %esi
494 ; X86-NEXT: testb $32, %al
495 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
496 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
497 ; X86-NEXT: jne .LBB6_5
498 ; X86-NEXT: # %bb.4: # %entry
499 ; X86-NEXT: movl %esi, %edi
500 ; X86-NEXT: .LBB6_5: # %entry
501 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
502 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
503 ; X86-NEXT: movl %edx, %ecx
504 ; X86-NEXT: shrl %cl, %ebx
505 ; X86-NEXT: shrl %cl, %ebp
506 ; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
507 ; X86-NEXT: movl %edx, %ecx
508 ; X86-NEXT: subl $64, %ecx
509 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
510 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
511 ; X86-NEXT: sbbl $0, %ecx
512 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
513 ; X86-NEXT: sbbl $0, %ecx
514 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
515 ; X86-NEXT: sbbl $0, %ecx
516 ; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
517 ; X86-NEXT: testb $32, %dl
518 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
519 ; X86-NEXT: movl $0, %ecx
520 ; X86-NEXT: jne .LBB6_7
521 ; X86-NEXT: # %bb.6: # %entry
522 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
523 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
524 ; X86-NEXT: movl %ebx, %ecx
525 ; X86-NEXT: .LBB6_7: # %entry
526 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
527 ; X86-NEXT: movl %edx, %ecx
528 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
529 ; X86-NEXT: shrdl %cl, %ebp, %esi
530 ; X86-NEXT: testb $32, %dl
531 ; X86-NEXT: jne .LBB6_9
532 ; X86-NEXT: # %bb.8: # %entry
533 ; X86-NEXT: movl %esi, %ebx
534 ; X86-NEXT: .LBB6_9: # %entry
535 ; X86-NEXT: movl %edi, %esi
536 ; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
537 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
538 ; X86-NEXT: movl %ecx, %ebp
539 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
540 ; X86-NEXT: shrl %cl, %ebp
541 ; X86-NEXT: testb $32, %cl
542 ; X86-NEXT: movl $0, %ecx
543 ; X86-NEXT: jne .LBB6_11
544 ; X86-NEXT: # %bb.10: # %entry
545 ; X86-NEXT: movl %ebp, %ecx
546 ; X86-NEXT: .LBB6_11: # %entry
547 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
548 ; X86-NEXT: movb $64, %cl
549 ; X86-NEXT: subb %dl, %cl
550 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
551 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
552 ; X86-NEXT: shldl %cl, %ebx, %edi
553 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
554 ; X86-NEXT: movl %ebx, %edi
555 ; X86-NEXT: shll %cl, %edi
556 ; X86-NEXT: testb $32, %cl
557 ; X86-NEXT: movb $64, %bl
558 ; X86-NEXT: jne .LBB6_12
559 ; X86-NEXT: # %bb.13: # %entry
560 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
561 ; X86-NEXT: jmp .LBB6_14
562 ; X86-NEXT: .LBB6_12:
563 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
564 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
565 ; X86-NEXT: .LBB6_14: # %entry
566 ; X86-NEXT: movl %esi, %edi
567 ; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
568 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
569 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
570 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
571 ; X86-NEXT: movl %edx, %ecx
572 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
573 ; X86-NEXT: shrdl %cl, %ebp, %esi
574 ; X86-NEXT: testb $32, %dl
575 ; X86-NEXT: jne .LBB6_16
576 ; X86-NEXT: # %bb.15: # %entry
577 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
578 ; X86-NEXT: .LBB6_16: # %entry
579 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
580 ; X86-NEXT: subb %al, %bl
581 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
582 ; X86-NEXT: movl %ebx, %ecx
583 ; X86-NEXT: shll %cl, %ebp
584 ; X86-NEXT: testb $32, %bl
585 ; X86-NEXT: movl $0, %ecx
586 ; X86-NEXT: jne .LBB6_18
587 ; X86-NEXT: # %bb.17: # %entry
588 ; X86-NEXT: movl %ebp, %ecx
589 ; X86-NEXT: .LBB6_18: # %entry
590 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
591 ; X86-NEXT: movl %eax, %ecx
592 ; X86-NEXT: subl $64, %ecx
593 ; X86-NEXT: sbbl $0, %esi
594 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
595 ; X86-NEXT: sbbl $0, %esi
596 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
597 ; X86-NEXT: sbbl $0, %esi
598 ; X86-NEXT: setae %bh
599 ; X86-NEXT: jb .LBB6_20
600 ; X86-NEXT: # %bb.19: # %entry
601 ; X86-NEXT: xorl %edi, %edi
602 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
603 ; X86-NEXT: .LBB6_20: # %entry
604 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
605 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
606 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
607 ; X86-NEXT: shrdl %cl, %esi, %edi
608 ; X86-NEXT: shrl %cl, %esi
609 ; X86-NEXT: testb $32, %cl
610 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
611 ; X86-NEXT: jne .LBB6_22
612 ; X86-NEXT: # %bb.21: # %entry
613 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
614 ; X86-NEXT: .LBB6_22: # %entry
615 ; X86-NEXT: testb %bh, %bh
616 ; X86-NEXT: jne .LBB6_24
617 ; X86-NEXT: # %bb.23:
618 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
619 ; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
620 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
621 ; X86-NEXT: .LBB6_24: # %entry
622 ; X86-NEXT: testb $32, %cl
623 ; X86-NEXT: movl $0, %ecx
624 ; X86-NEXT: jne .LBB6_26
625 ; X86-NEXT: # %bb.25: # %entry
626 ; X86-NEXT: movl %esi, %ecx
627 ; X86-NEXT: .LBB6_26: # %entry
628 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
629 ; X86-NEXT: movl %ebx, %ecx
630 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
631 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
632 ; X86-NEXT: shldl %cl, %edi, %esi
633 ; X86-NEXT: testb $32, %bl
634 ; X86-NEXT: jne .LBB6_28
635 ; X86-NEXT: # %bb.27: # %entry
636 ; X86-NEXT: movl %esi, %ebp
637 ; X86-NEXT: .LBB6_28: # %entry
638 ; X86-NEXT: testb %bh, %bh
639 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
640 ; X86-NEXT: jne .LBB6_30
641 ; X86-NEXT: # %bb.29:
642 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
643 ; X86-NEXT: orl %ebp, %ecx
644 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
645 ; X86-NEXT: .LBB6_30: # %entry
646 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
647 ; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
648 ; X86-NEXT: jne .LBB6_32
649 ; X86-NEXT: # %bb.31: # %entry
650 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
651 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
652 ; X86-NEXT: .LBB6_32: # %entry
653 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
654 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
655 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
656 ; X86-NEXT: shrdl %cl, %ebp, %edi
657 ; X86-NEXT: movl %edi, %ebp
658 ; X86-NEXT: testb $32, %cl
659 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
660 ; X86-NEXT: je .LBB6_33
661 ; X86-NEXT: # %bb.34: # %entry
662 ; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
663 ; X86-NEXT: jne .LBB6_35
664 ; X86-NEXT: .LBB6_36: # %entry
665 ; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
666 ; X86-NEXT: je .LBB6_38
667 ; X86-NEXT: .LBB6_37:
668 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
669 ; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
670 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
671 ; X86-NEXT: .LBB6_38: # %entry
672 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
673 ; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
674 ; X86-NEXT: orl {{[0-9]+}}(%esp), %edx
675 ; X86-NEXT: orl %ecx, %edx
676 ; X86-NEXT: je .LBB6_40
677 ; X86-NEXT: # %bb.39: # %entry
678 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
679 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
680 ; X86-NEXT: .LBB6_40: # %entry
681 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
682 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
683 ; X86-NEXT: orl {{[0-9]+}}(%esp), %edx
684 ; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
685 ; X86-NEXT: orl %edx, %eax
686 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
687 ; X86-NEXT: je .LBB6_42
688 ; X86-NEXT: # %bb.41: # %entry
689 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
690 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
691 ; X86-NEXT: .LBB6_42: # %entry
692 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
693 ; X86-NEXT: movl %edx, 28(%ecx)
694 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
695 ; X86-NEXT: movl %edx, 24(%ecx)
696 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
697 ; X86-NEXT: movl %edx, 12(%ecx)
698 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
699 ; X86-NEXT: movl %edx, 8(%ecx)
700 ; X86-NEXT: movl %esi, 20(%ecx)
701 ; X86-NEXT: movl %eax, 16(%ecx)
702 ; X86-NEXT: movl %ebx, 4(%ecx)
703 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
704 ; X86-NEXT: movl %eax, (%ecx)
705 ; X86-NEXT: addl $68, %esp
706 ; X86-NEXT: popl %esi
707 ; X86-NEXT: popl %edi
708 ; X86-NEXT: popl %ebx
709 ; X86-NEXT: popl %ebp
711 ; X86-NEXT: .LBB6_33: # %entry
712 ; X86-NEXT: movl %ebp, %edi
713 ; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
714 ; X86-NEXT: je .LBB6_36
715 ; X86-NEXT: .LBB6_35:
716 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
717 ; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
718 ; X86-NEXT: movl %ecx, %edi
719 ; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
720 ; X86-NEXT: jne .LBB6_37
721 ; X86-NEXT: jmp .LBB6_38
723 ; X64-LABEL: test_lshr_v2i128:
724 ; X64: # %bb.0: # %entry
725 ; X64-NEXT: movq %rcx, %rax
726 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10
727 ; X64-NEXT: movb {{[0-9]+}}(%rsp), %r9b
728 ; X64-NEXT: movl %r9d, %ecx
729 ; X64-NEXT: shrdq %cl, %rax, %rdx
730 ; X64-NEXT: movl %r8d, %ecx
731 ; X64-NEXT: shrdq %cl, %rsi, %rdi
732 ; X64-NEXT: shrq %cl, %rsi
733 ; X64-NEXT: xorl %r11d, %r11d
734 ; X64-NEXT: testb $64, %r8b
735 ; X64-NEXT: cmovneq %rsi, %rdi
736 ; X64-NEXT: cmovneq %r11, %rsi
737 ; X64-NEXT: movl %r9d, %ecx
738 ; X64-NEXT: shrq %cl, %rax
739 ; X64-NEXT: testb $64, %r9b
740 ; X64-NEXT: cmovneq %rax, %rdx
741 ; X64-NEXT: cmovneq %r11, %rax
742 ; X64-NEXT: movq %rax, 24(%r10)
743 ; X64-NEXT: movq %rdx, 16(%r10)
744 ; X64-NEXT: movq %rsi, 8(%r10)
745 ; X64-NEXT: movq %rdi, (%r10)
748 %0 = lshr <2 x i128> %x, %a
749 store <2 x i128> %0, <2 x i128>* %r, align 16
753 define void @test_ashr_v2i128(<2 x i128> %x, <2 x i128> %a, <2 x i128>* nocapture %r) nounwind {
754 ; X86-LABEL: test_ashr_v2i128:
755 ; X86: # %bb.0: # %entry
756 ; X86-NEXT: pushl %ebp
757 ; X86-NEXT: pushl %ebx
758 ; X86-NEXT: pushl %edi
759 ; X86-NEXT: pushl %esi
760 ; X86-NEXT: subl $80, %esp
761 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
762 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
763 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
764 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
765 ; X86-NEXT: movl %ebp, %ebx
766 ; X86-NEXT: movl %eax, %ecx
767 ; X86-NEXT: sarl %cl, %ebx
768 ; X86-NEXT: movl %esi, %edi
769 ; X86-NEXT: shrl %cl, %edi
770 ; X86-NEXT: shrdl %cl, %esi, %edx
771 ; X86-NEXT: sarl $31, %ebp
772 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
773 ; X86-NEXT: testb $32, %al
774 ; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
775 ; X86-NEXT: jne .LBB7_1
776 ; X86-NEXT: # %bb.2: # %entry
777 ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
778 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
779 ; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
780 ; X86-NEXT: jmp .LBB7_3
782 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
783 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
784 ; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
785 ; X86-NEXT: .LBB7_3: # %entry
786 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
787 ; X86-NEXT: movl %eax, %ecx
788 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
789 ; X86-NEXT: shrdl %cl, %edx, %edi
790 ; X86-NEXT: testb $32, %al
791 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
792 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
793 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
794 ; X86-NEXT: jne .LBB7_5
795 ; X86-NEXT: # %bb.4: # %entry
796 ; X86-NEXT: movl %edi, %ebx
797 ; X86-NEXT: .LBB7_5: # %entry
798 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
799 ; X86-NEXT: movl %ecx, %ebp
800 ; X86-NEXT: movl %ecx, %edi
801 ; X86-NEXT: movl %edx, %ecx
802 ; X86-NEXT: sarl %cl, %edi
803 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
804 ; X86-NEXT: shrl %cl, %esi
805 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
806 ; X86-NEXT: sarl $31, %ebp
807 ; X86-NEXT: movl %edx, %ecx
808 ; X86-NEXT: subl $64, %ecx
809 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
810 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
811 ; X86-NEXT: sbbl $0, %ecx
812 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
813 ; X86-NEXT: sbbl $0, %ecx
814 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
815 ; X86-NEXT: sbbl $0, %ecx
816 ; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
817 ; X86-NEXT: testb $32, %dl
818 ; X86-NEXT: movl $0, %esi
819 ; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
820 ; X86-NEXT: movl %ebp, %ecx
821 ; X86-NEXT: jne .LBB7_7
822 ; X86-NEXT: # %bb.6: # %entry
823 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
824 ; X86-NEXT: movl %edi, %ecx
825 ; X86-NEXT: .LBB7_7: # %entry
826 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
827 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
828 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
829 ; X86-NEXT: movl %edx, %ecx
830 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
831 ; X86-NEXT: shrdl %cl, %ebp, %esi
832 ; X86-NEXT: testb $32, %dl
833 ; X86-NEXT: jne .LBB7_9
834 ; X86-NEXT: # %bb.8: # %entry
835 ; X86-NEXT: movl %esi, %edi
836 ; X86-NEXT: .LBB7_9: # %entry
837 ; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
838 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
839 ; X86-NEXT: movl %ecx, %esi
840 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
841 ; X86-NEXT: sarl %cl, %esi
842 ; X86-NEXT: testb $32, %cl
843 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
844 ; X86-NEXT: jne .LBB7_11
845 ; X86-NEXT: # %bb.10: # %entry
846 ; X86-NEXT: movl %esi, %ecx
847 ; X86-NEXT: .LBB7_11: # %entry
848 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
849 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
850 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
851 ; X86-NEXT: movb $64, %cl
852 ; X86-NEXT: subb %dl, %cl
853 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
854 ; X86-NEXT: movl %ebx, %ebp
855 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
856 ; X86-NEXT: shldl %cl, %ebx, %ebp
857 ; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
858 ; X86-NEXT: movl %ebx, %ebp
859 ; X86-NEXT: shll %cl, %ebp
860 ; X86-NEXT: testb $32, %cl
861 ; X86-NEXT: movb $64, %bl
862 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
863 ; X86-NEXT: je .LBB7_13
864 ; X86-NEXT: # %bb.12:
865 ; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
866 ; X86-NEXT: xorl %ebp, %ebp
867 ; X86-NEXT: .LBB7_13: # %entry
868 ; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
869 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
870 ; X86-NEXT: movl %edx, %ecx
871 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
872 ; X86-NEXT: shrdl %cl, %edi, %esi
873 ; X86-NEXT: testb $32, %dl
874 ; X86-NEXT: jne .LBB7_15
875 ; X86-NEXT: # %bb.14: # %entry
876 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
877 ; X86-NEXT: .LBB7_15: # %entry
878 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
879 ; X86-NEXT: subb %al, %bl
880 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
881 ; X86-NEXT: movl %ebx, %ecx
882 ; X86-NEXT: shll %cl, %ebp
883 ; X86-NEXT: testb $32, %bl
884 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
885 ; X86-NEXT: jne .LBB7_17
886 ; X86-NEXT: # %bb.16: # %entry
887 ; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
888 ; X86-NEXT: .LBB7_17: # %entry
889 ; X86-NEXT: movl %eax, %ecx
890 ; X86-NEXT: subl $64, %ecx
891 ; X86-NEXT: sbbl $0, %esi
892 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
893 ; X86-NEXT: sbbl $0, %esi
894 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
895 ; X86-NEXT: sbbl $0, %esi
896 ; X86-NEXT: setae %bh
897 ; X86-NEXT: jb .LBB7_19
898 ; X86-NEXT: # %bb.18: # %entry
899 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
900 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
901 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
902 ; X86-NEXT: .LBB7_19: # %entry
903 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
904 ; X86-NEXT: shrdl %cl, %edi, %esi
905 ; X86-NEXT: sarl %cl, %edi
906 ; X86-NEXT: testb $32, %cl
907 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
908 ; X86-NEXT: je .LBB7_20
909 ; X86-NEXT: # %bb.21: # %entry
910 ; X86-NEXT: testb %bh, %bh
911 ; X86-NEXT: je .LBB7_22
912 ; X86-NEXT: .LBB7_23: # %entry
913 ; X86-NEXT: testb $32, %cl
914 ; X86-NEXT: jne .LBB7_25
915 ; X86-NEXT: .LBB7_24: # %entry
916 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
917 ; X86-NEXT: .LBB7_25: # %entry
918 ; X86-NEXT: movl %ebx, %ecx
919 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
920 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
921 ; X86-NEXT: shldl %cl, %esi, %edi
922 ; X86-NEXT: testb $32, %bl
923 ; X86-NEXT: jne .LBB7_27
924 ; X86-NEXT: # %bb.26: # %entry
925 ; X86-NEXT: movl %edi, %ebp
926 ; X86-NEXT: .LBB7_27: # %entry
927 ; X86-NEXT: testb %bh, %bh
928 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
929 ; X86-NEXT: jne .LBB7_29
930 ; X86-NEXT: # %bb.28:
931 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
932 ; X86-NEXT: orl %ebp, %ebx
933 ; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
934 ; X86-NEXT: .LBB7_29: # %entry
935 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
936 ; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
937 ; X86-NEXT: jne .LBB7_31
938 ; X86-NEXT: # %bb.30: # %entry
939 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
940 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
941 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
942 ; X86-NEXT: .LBB7_31: # %entry
943 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
944 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
945 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
946 ; X86-NEXT: shrdl %cl, %ebp, %ebx
947 ; X86-NEXT: testb $32, %cl
948 ; X86-NEXT: jne .LBB7_33
949 ; X86-NEXT: # %bb.32: # %entry
950 ; X86-NEXT: movl %ebx, %esi
951 ; X86-NEXT: .LBB7_33: # %entry
952 ; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
953 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
954 ; X86-NEXT: je .LBB7_35
955 ; X86-NEXT: # %bb.34:
956 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
957 ; X86-NEXT: orl %ebx, %ecx
958 ; X86-NEXT: movl %ecx, %esi
959 ; X86-NEXT: .LBB7_35: # %entry
960 ; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
961 ; X86-NEXT: je .LBB7_37
962 ; X86-NEXT: # %bb.36:
963 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
964 ; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
965 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
966 ; X86-NEXT: .LBB7_37: # %entry
967 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
968 ; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
969 ; X86-NEXT: orl {{[0-9]+}}(%esp), %edx
970 ; X86-NEXT: orl %ecx, %edx
971 ; X86-NEXT: je .LBB7_39
972 ; X86-NEXT: # %bb.38: # %entry
973 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
974 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
975 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
976 ; X86-NEXT: .LBB7_39: # %entry
977 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
978 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
979 ; X86-NEXT: orl {{[0-9]+}}(%esp), %edx
980 ; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
981 ; X86-NEXT: orl %edx, %eax
982 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
983 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
984 ; X86-NEXT: je .LBB7_41
985 ; X86-NEXT: # %bb.40: # %entry
986 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
987 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
988 ; X86-NEXT: .LBB7_41: # %entry
989 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
990 ; X86-NEXT: movl %edx, 28(%ecx)
991 ; X86-NEXT: movl %edi, 24(%ecx)
992 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
993 ; X86-NEXT: movl %edx, 12(%ecx)
994 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
995 ; X86-NEXT: movl %edx, 8(%ecx)
996 ; X86-NEXT: movl %esi, 20(%ecx)
997 ; X86-NEXT: movl %eax, 16(%ecx)
998 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
999 ; X86-NEXT: movl %eax, 4(%ecx)
1000 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1001 ; X86-NEXT: movl %eax, (%ecx)
1002 ; X86-NEXT: addl $80, %esp
1003 ; X86-NEXT: popl %esi
1004 ; X86-NEXT: popl %edi
1005 ; X86-NEXT: popl %ebx
1006 ; X86-NEXT: popl %ebp
1008 ; X86-NEXT: .LBB7_20: # %entry
1009 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1010 ; X86-NEXT: testb %bh, %bh
1011 ; X86-NEXT: jne .LBB7_23
1012 ; X86-NEXT: .LBB7_22:
1013 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
1014 ; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
1015 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1016 ; X86-NEXT: testb $32, %cl
1017 ; X86-NEXT: je .LBB7_24
1018 ; X86-NEXT: jmp .LBB7_25
1020 ; X64-LABEL: test_ashr_v2i128:
1021 ; X64: # %bb.0: # %entry
1022 ; X64-NEXT: movq %rcx, %r11
1023 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10
1024 ; X64-NEXT: movb {{[0-9]+}}(%rsp), %r9b
1025 ; X64-NEXT: movl %r9d, %ecx
1026 ; X64-NEXT: shrdq %cl, %r11, %rdx
1027 ; X64-NEXT: movl %r8d, %ecx
1028 ; X64-NEXT: shrdq %cl, %rsi, %rdi
1029 ; X64-NEXT: movq %rsi, %rax
1030 ; X64-NEXT: sarq %cl, %rax
1031 ; X64-NEXT: sarq $63, %rsi
1032 ; X64-NEXT: testb $64, %r8b
1033 ; X64-NEXT: cmovneq %rax, %rdi
1034 ; X64-NEXT: cmoveq %rax, %rsi
1035 ; X64-NEXT: movq %r11, %rax
1036 ; X64-NEXT: movl %r9d, %ecx
1037 ; X64-NEXT: sarq %cl, %rax
1038 ; X64-NEXT: sarq $63, %r11
1039 ; X64-NEXT: testb $64, %r9b
1040 ; X64-NEXT: cmovneq %rax, %rdx
1041 ; X64-NEXT: cmoveq %rax, %r11
1042 ; X64-NEXT: movq %r11, 24(%r10)
1043 ; X64-NEXT: movq %rdx, 16(%r10)
1044 ; X64-NEXT: movq %rsi, 8(%r10)
1045 ; X64-NEXT: movq %rdi, (%r10)
1048 %0 = ashr <2 x i128> %x, %a
1049 store <2 x i128> %0, <2 x i128>* %r, align 16
1053 define void @test_shl_v2i128(<2 x i128> %x, <2 x i128> %a, <2 x i128>* nocapture %r) nounwind {
1054 ; X86-LABEL: test_shl_v2i128:
1055 ; X86: # %bb.0: # %entry
1056 ; X86-NEXT: pushl %ebp
1057 ; X86-NEXT: pushl %ebx
1058 ; X86-NEXT: pushl %edi
1059 ; X86-NEXT: pushl %esi
1060 ; X86-NEXT: subl $72, %esp
1061 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1062 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1063 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
1064 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
1065 ; X86-NEXT: movl %ebx, %ecx
1066 ; X86-NEXT: shll %cl, %ebp
1067 ; X86-NEXT: movl %eax, %esi
1068 ; X86-NEXT: shll %cl, %esi
1069 ; X86-NEXT: movl %edx, %eax
1070 ; X86-NEXT: subl $64, %eax
1071 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1072 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1073 ; X86-NEXT: sbbl $0, %eax
1074 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1075 ; X86-NEXT: sbbl $0, %eax
1076 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1077 ; X86-NEXT: sbbl $0, %eax
1078 ; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
1079 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
1080 ; X86-NEXT: testb $32, %bl
1081 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
1082 ; X86-NEXT: movl $0, %eax
1083 ; X86-NEXT: movl $0, %ecx
1084 ; X86-NEXT: jne .LBB8_2
1085 ; X86-NEXT: # %bb.1: # %entry
1086 ; X86-NEXT: movl %esi, %eax
1087 ; X86-NEXT: movl %ebp, %ecx
1088 ; X86-NEXT: .LBB8_2: # %entry
1089 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1090 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1091 ; X86-NEXT: movl %edi, %eax
1092 ; X86-NEXT: movl %ebx, %ecx
1093 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
1094 ; X86-NEXT: shldl %cl, %edi, %eax
1095 ; X86-NEXT: testb $32, %bl
1096 ; X86-NEXT: jne .LBB8_4
1097 ; X86-NEXT: # %bb.3: # %entry
1098 ; X86-NEXT: movl %eax, %esi
1099 ; X86-NEXT: .LBB8_4: # %entry
1100 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1101 ; X86-NEXT: movb $64, %cl
1102 ; X86-NEXT: subb %bl, %cl
1103 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
1104 ; X86-NEXT: movl %edi, %esi
1105 ; X86-NEXT: shrl %cl, %esi
1106 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1107 ; X86-NEXT: shrdl %cl, %edi, %eax
1108 ; X86-NEXT: testb $32, %cl
1109 ; X86-NEXT: jne .LBB8_5
1110 ; X86-NEXT: # %bb.6: # %entry
1111 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1112 ; X86-NEXT: jmp .LBB8_7
1113 ; X86-NEXT: .LBB8_5:
1114 ; X86-NEXT: movl %esi, %eax
1115 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
1116 ; X86-NEXT: .LBB8_7: # %entry
1117 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
1118 ; X86-NEXT: movl %ebx, %ecx
1119 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
1120 ; X86-NEXT: shldl %cl, %esi, %edi
1121 ; X86-NEXT: testb $32, %bl
1122 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1123 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1124 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1125 ; X86-NEXT: jne .LBB8_9
1126 ; X86-NEXT: # %bb.8: # %entry
1127 ; X86-NEXT: movl %edi, %ebp
1128 ; X86-NEXT: .LBB8_9: # %entry
1129 ; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1130 ; X86-NEXT: movl %ecx, %ebp
1131 ; X86-NEXT: movl %edx, %ecx
1132 ; X86-NEXT: shll %cl, %ebp
1133 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1134 ; X86-NEXT: movl %ecx, %esi
1135 ; X86-NEXT: movl %edx, %ecx
1136 ; X86-NEXT: shll %cl, %esi
1137 ; X86-NEXT: testb $32, %dl
1138 ; X86-NEXT: movl $0, %edi
1139 ; X86-NEXT: movl $0, %ecx
1140 ; X86-NEXT: jne .LBB8_11
1141 ; X86-NEXT: # %bb.10: # %entry
1142 ; X86-NEXT: movl %esi, %edi
1143 ; X86-NEXT: movl %ebp, %ecx
1144 ; X86-NEXT: .LBB8_11: # %entry
1145 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1146 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1147 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
1148 ; X86-NEXT: movl %edx, %ecx
1149 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
1150 ; X86-NEXT: shldl %cl, %ebx, %edi
1151 ; X86-NEXT: testb $32, %dl
1152 ; X86-NEXT: jne .LBB8_13
1153 ; X86-NEXT: # %bb.12: # %entry
1154 ; X86-NEXT: movl %edi, %ebp
1155 ; X86-NEXT: .LBB8_13: # %entry
1156 ; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1157 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
1158 ; X86-NEXT: movb $64, %cl
1159 ; X86-NEXT: subb %dl, %cl
1160 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
1161 ; X86-NEXT: shrl %cl, %ebx
1162 ; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1163 ; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
1164 ; X86-NEXT: testb $32, %cl
1165 ; X86-NEXT: movl $0, %ecx
1166 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
1167 ; X86-NEXT: jne .LBB8_15
1168 ; X86-NEXT: # %bb.14: # %entry
1169 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
1170 ; X86-NEXT: .LBB8_15: # %entry
1171 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1172 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1173 ; X86-NEXT: movl %edx, %ecx
1174 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
1175 ; X86-NEXT: shldl %cl, %ebp, %edi
1176 ; X86-NEXT: testb $32, %dl
1177 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
1178 ; X86-NEXT: jne .LBB8_17
1179 ; X86-NEXT: # %bb.16: # %entry
1180 ; X86-NEXT: movl %edi, %esi
1181 ; X86-NEXT: .LBB8_17: # %entry
1182 ; X86-NEXT: orl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
1183 ; X86-NEXT: movl %ebx, %eax
1184 ; X86-NEXT: subl $64, %eax
1185 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1186 ; X86-NEXT: sbbl $0, %ecx
1187 ; X86-NEXT: movl %ebp, %ecx
1188 ; X86-NEXT: sbbl $0, %ecx
1189 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1190 ; X86-NEXT: sbbl $0, %ecx
1191 ; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
1192 ; X86-NEXT: jb .LBB8_19
1193 ; X86-NEXT: # %bb.18: # %entry
1194 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
1195 ; X86-NEXT: .LBB8_19: # %entry
1196 ; X86-NEXT: jb .LBB8_21
1197 ; X86-NEXT: # %bb.20: # %entry
1198 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
1199 ; X86-NEXT: .LBB8_21: # %entry
1200 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
1201 ; X86-NEXT: movl %ebp, %ebx
1202 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
1203 ; X86-NEXT: shll %cl, %ebx
1204 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
1205 ; X86-NEXT: shldl %cl, %ebp, %edi
1206 ; X86-NEXT: testb $32, %cl
1207 ; X86-NEXT: movl %ebx, %ecx
1208 ; X86-NEXT: jne .LBB8_23
1209 ; X86-NEXT: # %bb.22: # %entry
1210 ; X86-NEXT: movl %edi, %ecx
1211 ; X86-NEXT: .LBB8_23: # %entry
1212 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1213 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1214 ; X86-NEXT: movl %ecx, %edi
1215 ; X86-NEXT: movl %eax, %ecx
1216 ; X86-NEXT: shll %cl, %edi
1217 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1218 ; X86-NEXT: testb $32, %al
1219 ; X86-NEXT: movl $0, %edi
1220 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
1221 ; X86-NEXT: jne .LBB8_25
1222 ; X86-NEXT: # %bb.24: # %entry
1223 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
1224 ; X86-NEXT: .LBB8_25: # %entry
1225 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1226 ; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
1227 ; X86-NEXT: jne .LBB8_27
1228 ; X86-NEXT: # %bb.26: # %entry
1229 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1230 ; X86-NEXT: .LBB8_27: # %entry
1231 ; X86-NEXT: movl %eax, %ecx
1232 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
1233 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
1234 ; X86-NEXT: shldl %cl, %edi, %esi
1235 ; X86-NEXT: testb $32, %al
1236 ; X86-NEXT: jne .LBB8_29
1237 ; X86-NEXT: # %bb.28: # %entry
1238 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1239 ; X86-NEXT: .LBB8_29: # %entry
1240 ; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
1241 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
1242 ; X86-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %al # 1-byte Reload
1243 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
1244 ; X86-NEXT: jne .LBB8_30
1245 ; X86-NEXT: # %bb.31: # %entry
1246 ; X86-NEXT: testb %al, %al
1247 ; X86-NEXT: je .LBB8_32
1248 ; X86-NEXT: .LBB8_33: # %entry
1249 ; X86-NEXT: testb $32, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
1250 ; X86-NEXT: jne .LBB8_35
1251 ; X86-NEXT: .LBB8_34: # %entry
1252 ; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1253 ; X86-NEXT: .LBB8_35: # %entry
1254 ; X86-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Reload
1255 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
1256 ; X86-NEXT: shrdl %cl, %ebx, %esi
1257 ; X86-NEXT: testb $32, %cl
1258 ; X86-NEXT: jne .LBB8_37
1259 ; X86-NEXT: # %bb.36: # %entry
1260 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1261 ; X86-NEXT: .LBB8_37: # %entry
1262 ; X86-NEXT: testb %al, %al
1263 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
1264 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
1265 ; X86-NEXT: jne .LBB8_38
1266 ; X86-NEXT: # %bb.39: # %entry
1267 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1268 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
1269 ; X86-NEXT: testb %al, %al
1270 ; X86-NEXT: jne .LBB8_41
1271 ; X86-NEXT: jmp .LBB8_42
1272 ; X86-NEXT: .LBB8_30:
1273 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
1274 ; X86-NEXT: orl %ebp, %ecx
1275 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1276 ; X86-NEXT: testb %al, %al
1277 ; X86-NEXT: jne .LBB8_33
1278 ; X86-NEXT: .LBB8_32: # %entry
1279 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
1280 ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
1281 ; X86-NEXT: testb $32, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
1282 ; X86-NEXT: je .LBB8_34
1283 ; X86-NEXT: jmp .LBB8_35
1284 ; X86-NEXT: .LBB8_38:
1285 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
1286 ; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
1287 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1288 ; X86-NEXT: testb %al, %al
1289 ; X86-NEXT: je .LBB8_42
1290 ; X86-NEXT: .LBB8_41:
1291 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1292 ; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1293 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1294 ; X86-NEXT: .LBB8_42: # %entry
1295 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1296 ; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
1297 ; X86-NEXT: orl {{[0-9]+}}(%esp), %edx
1298 ; X86-NEXT: orl %eax, %edx
1299 ; X86-NEXT: je .LBB8_44
1300 ; X86-NEXT: # %bb.43: # %entry
1301 ; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1302 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1303 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1304 ; X86-NEXT: .LBB8_44: # %entry
1305 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1306 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1307 ; X86-NEXT: orl {{[0-9]+}}(%esp), %edx
1308 ; X86-NEXT: orl {{[0-9]+}}(%esp), %ebx
1309 ; X86-NEXT: orl %edx, %ebx
1310 ; X86-NEXT: je .LBB8_46
1311 ; X86-NEXT: # %bb.45: # %entry
1312 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
1313 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
1314 ; X86-NEXT: .LBB8_46: # %entry
1315 ; X86-NEXT: movl %esi, 20(%eax)
1316 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
1317 ; X86-NEXT: movl %edx, 16(%eax)
1318 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
1319 ; X86-NEXT: movl %edx, 4(%eax)
1320 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
1321 ; X86-NEXT: movl %edx, (%eax)
1322 ; X86-NEXT: movl %edi, 28(%eax)
1323 ; X86-NEXT: movl %ecx, 24(%eax)
1324 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
1325 ; X86-NEXT: movl %ecx, 12(%eax)
1326 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
1327 ; X86-NEXT: movl %ecx, 8(%eax)
1328 ; X86-NEXT: addl $72, %esp
1329 ; X86-NEXT: popl %esi
1330 ; X86-NEXT: popl %edi
1331 ; X86-NEXT: popl %ebx
1332 ; X86-NEXT: popl %ebp
1335 ; X64-LABEL: test_shl_v2i128:
1336 ; X64: # %bb.0: # %entry
1337 ; X64-NEXT: movq %rcx, %rax
1338 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10
1339 ; X64-NEXT: movb {{[0-9]+}}(%rsp), %r9b
1340 ; X64-NEXT: movl %r9d, %ecx
1341 ; X64-NEXT: shldq %cl, %rdx, %rax
1342 ; X64-NEXT: movl %r8d, %ecx
1343 ; X64-NEXT: shldq %cl, %rdi, %rsi
1344 ; X64-NEXT: shlq %cl, %rdi
1345 ; X64-NEXT: xorl %r11d, %r11d
1346 ; X64-NEXT: testb $64, %r8b
1347 ; X64-NEXT: cmovneq %rdi, %rsi
1348 ; X64-NEXT: cmovneq %r11, %rdi
1349 ; X64-NEXT: movl %r9d, %ecx
1350 ; X64-NEXT: shlq %cl, %rdx
1351 ; X64-NEXT: testb $64, %r9b
1352 ; X64-NEXT: cmovneq %rdx, %rax
1353 ; X64-NEXT: cmovneq %r11, %rdx
1354 ; X64-NEXT: movq %rax, 24(%r10)
1355 ; X64-NEXT: movq %rdx, 16(%r10)
1356 ; X64-NEXT: movq %rsi, 8(%r10)
1357 ; X64-NEXT: movq %rdi, (%r10)
1360 %0 = shl <2 x i128> %x, %a
1361 store <2 x i128> %0, <2 x i128>* %r, align 16
1365 define void @test_lshr_v2i128_outofrange(<2 x i128> %x, <2 x i128>* nocapture %r) nounwind {
1366 ; CHECK-LABEL: test_lshr_v2i128_outofrange:
1367 ; CHECK: # %bb.0: # %entry
1368 ; CHECK-NEXT: ret{{[l|q]}}
1370 %0 = lshr <2 x i128> %x, <i128 -1, i128 -1>
1371 store <2 x i128> %0, <2 x i128>* %r, align 16
1375 define void @test_ashr_v2i128_outofrange(<2 x i128> %x, <2 x i128>* nocapture %r) nounwind {
1376 ; CHECK-LABEL: test_ashr_v2i128_outofrange:
1377 ; CHECK: # %bb.0: # %entry
1378 ; CHECK-NEXT: ret{{[l|q]}}
1380 %0 = ashr <2 x i128> %x, <i128 -1, i128 -1>
1381 store <2 x i128> %0, <2 x i128>* %r, align 16
1385 define void @test_shl_v2i128_outofrange(<2 x i128> %x, <2 x i128>* nocapture %r) nounwind {
1386 ; CHECK-LABEL: test_shl_v2i128_outofrange:
1387 ; CHECK: # %bb.0: # %entry
1388 ; CHECK-NEXT: ret{{[l|q]}}
1390 %0 = shl <2 x i128> %x, <i128 -1, i128 -1>
1391 store <2 x i128> %0, <2 x i128>* %r, align 16
1395 define void @test_lshr_v2i128_outofrange_sum(<2 x i128> %x, <2 x i128>* nocapture %r) nounwind {
1396 ; X86-LABEL: test_lshr_v2i128_outofrange_sum:
1397 ; X86: # %bb.0: # %entry
1398 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1399 ; X86-NEXT: movl $0, 28(%eax)
1400 ; X86-NEXT: movl $0, 24(%eax)
1401 ; X86-NEXT: movl $0, 20(%eax)
1402 ; X86-NEXT: movl $0, 16(%eax)
1403 ; X86-NEXT: movl $0, 12(%eax)
1404 ; X86-NEXT: movl $0, 8(%eax)
1405 ; X86-NEXT: movl $0, 4(%eax)
1406 ; X86-NEXT: movl $0, (%eax)
1409 ; X64-LABEL: test_lshr_v2i128_outofrange_sum:
1410 ; X64: # %bb.0: # %entry
1411 ; X64-NEXT: xorps %xmm0, %xmm0
1412 ; X64-NEXT: movaps %xmm0, 16(%r8)
1413 ; X64-NEXT: movaps %xmm0, (%r8)
1416 %0 = lshr <2 x i128> %x, <i128 -1, i128 -1>
1417 %1 = lshr <2 x i128> %0, <i128 1, i128 1>
1418 store <2 x i128> %1, <2 x i128>* %r, align 16
1422 define void @test_ashr_v2i128_outofrange_sum(<2 x i128> %x, <2 x i128>* nocapture %r) nounwind {
1423 ; X86-LABEL: test_ashr_v2i128_outofrange_sum:
1424 ; X86: # %bb.0: # %entry
1425 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1426 ; X86-NEXT: movl $0, 28(%eax)
1427 ; X86-NEXT: movl $0, 24(%eax)
1428 ; X86-NEXT: movl $0, 20(%eax)
1429 ; X86-NEXT: movl $0, 16(%eax)
1430 ; X86-NEXT: movl $0, 12(%eax)
1431 ; X86-NEXT: movl $0, 8(%eax)
1432 ; X86-NEXT: movl $0, 4(%eax)
1433 ; X86-NEXT: movl $0, (%eax)
1436 ; X64-LABEL: test_ashr_v2i128_outofrange_sum:
1437 ; X64: # %bb.0: # %entry
1438 ; X64-NEXT: xorps %xmm0, %xmm0
1439 ; X64-NEXT: movaps %xmm0, 16(%r8)
1440 ; X64-NEXT: movaps %xmm0, (%r8)
1443 %0 = ashr <2 x i128> %x, <i128 -1, i128 -1>
1444 %1 = ashr <2 x i128> %0, <i128 1, i128 1>
1445 store <2 x i128> %1, <2 x i128>* %r, align 16
1449 define void @test_shl_v2i128_outofrange_sum(<2 x i128> %x, <2 x i128>* nocapture %r) nounwind {
1450 ; X86-LABEL: test_shl_v2i128_outofrange_sum:
1451 ; X86: # %bb.0: # %entry
1452 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1453 ; X86-NEXT: movl $0, 28(%eax)
1454 ; X86-NEXT: movl $0, 24(%eax)
1455 ; X86-NEXT: movl $0, 20(%eax)
1456 ; X86-NEXT: movl $0, 16(%eax)
1457 ; X86-NEXT: movl $0, 12(%eax)
1458 ; X86-NEXT: movl $0, 8(%eax)
1459 ; X86-NEXT: movl $0, 4(%eax)
1460 ; X86-NEXT: movl $0, (%eax)
1463 ; X64-LABEL: test_shl_v2i128_outofrange_sum:
1464 ; X64: # %bb.0: # %entry
1465 ; X64-NEXT: xorps %xmm0, %xmm0
1466 ; X64-NEXT: movaps %xmm0, 16(%r8)
1467 ; X64-NEXT: movaps %xmm0, (%r8)
1470 %0 = shl <2 x i128> %x, <i128 -1, i128 -1>
1471 %1 = shl <2 x i128> %0, <i128 1, i128 1>
1472 store <2 x i128> %1, <2 x i128>* %r, align 16
1480 define <2 x i256> @shl_sext_shl_outofrange(<2 x i128> %a0) {
1481 ; X86-LABEL: shl_sext_shl_outofrange:
1483 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1484 ; X86-NEXT: movl $0, 60(%eax)
1485 ; X86-NEXT: movl $0, 56(%eax)
1486 ; X86-NEXT: movl $0, 52(%eax)
1487 ; X86-NEXT: movl $0, 48(%eax)
1488 ; X86-NEXT: movl $0, 44(%eax)
1489 ; X86-NEXT: movl $0, 40(%eax)
1490 ; X86-NEXT: movl $0, 36(%eax)
1491 ; X86-NEXT: movl $0, 32(%eax)
1492 ; X86-NEXT: movl $0, 28(%eax)
1493 ; X86-NEXT: movl $0, 24(%eax)
1494 ; X86-NEXT: movl $0, 20(%eax)
1495 ; X86-NEXT: movl $0, 16(%eax)
1496 ; X86-NEXT: movl $0, 12(%eax)
1497 ; X86-NEXT: movl $0, 8(%eax)
1498 ; X86-NEXT: movl $0, 4(%eax)
1499 ; X86-NEXT: movl $0, (%eax)
1502 ; X64-LABEL: shl_sext_shl_outofrange:
1504 ; X64-NEXT: movq %rdi, %rax
1505 ; X64-NEXT: xorps %xmm0, %xmm0
1506 ; X64-NEXT: movaps %xmm0, 48(%rdi)
1507 ; X64-NEXT: movaps %xmm0, 32(%rdi)
1508 ; X64-NEXT: movaps %xmm0, 16(%rdi)
1509 ; X64-NEXT: movaps %xmm0, (%rdi)
1511 %1 = shl <2 x i128> %a0, <i128 -1, i128 -1>
1512 %2 = sext <2 x i128> %1 to <2 x i256>
1513 %3 = shl <2 x i256> %2, <i256 128, i256 128>
1517 define <2 x i256> @shl_zext_shl_outofrange(<2 x i128> %a0) {
1518 ; X86-LABEL: shl_zext_shl_outofrange:
1520 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1521 ; X86-NEXT: movl $0, 60(%eax)
1522 ; X86-NEXT: movl $0, 56(%eax)
1523 ; X86-NEXT: movl $0, 52(%eax)
1524 ; X86-NEXT: movl $0, 48(%eax)
1525 ; X86-NEXT: movl $0, 44(%eax)
1526 ; X86-NEXT: movl $0, 40(%eax)
1527 ; X86-NEXT: movl $0, 36(%eax)
1528 ; X86-NEXT: movl $0, 32(%eax)
1529 ; X86-NEXT: movl $0, 28(%eax)
1530 ; X86-NEXT: movl $0, 24(%eax)
1531 ; X86-NEXT: movl $0, 20(%eax)
1532 ; X86-NEXT: movl $0, 16(%eax)
1533 ; X86-NEXT: movl $0, 12(%eax)
1534 ; X86-NEXT: movl $0, 8(%eax)
1535 ; X86-NEXT: movl $0, 4(%eax)
1536 ; X86-NEXT: movl $0, (%eax)
1539 ; X64-LABEL: shl_zext_shl_outofrange:
1541 ; X64-NEXT: movq %rdi, %rax
1542 ; X64-NEXT: xorps %xmm0, %xmm0
1543 ; X64-NEXT: movaps %xmm0, 48(%rdi)
1544 ; X64-NEXT: movaps %xmm0, 32(%rdi)
1545 ; X64-NEXT: movaps %xmm0, 16(%rdi)
1546 ; X64-NEXT: movaps %xmm0, (%rdi)
1548 %1 = shl <2 x i128> %a0, <i128 -1, i128 -1>
1549 %2 = zext <2 x i128> %1 to <2 x i256>
1550 %3 = shl <2 x i256> %2, <i256 128, i256 128>
1554 define <2 x i256> @shl_zext_lshr_outofrange(<2 x i128> %a0) {
1555 ; X86-LABEL: shl_zext_lshr_outofrange:
1557 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1558 ; X86-NEXT: movl $0, 60(%eax)
1559 ; X86-NEXT: movl $0, 56(%eax)
1560 ; X86-NEXT: movl $0, 52(%eax)
1561 ; X86-NEXT: movl $0, 48(%eax)
1562 ; X86-NEXT: movl $0, 44(%eax)
1563 ; X86-NEXT: movl $0, 40(%eax)
1564 ; X86-NEXT: movl $0, 36(%eax)
1565 ; X86-NEXT: movl $0, 32(%eax)
1566 ; X86-NEXT: movl $0, 28(%eax)
1567 ; X86-NEXT: movl $0, 24(%eax)
1568 ; X86-NEXT: movl $0, 20(%eax)
1569 ; X86-NEXT: movl $0, 16(%eax)
1570 ; X86-NEXT: movl $0, 12(%eax)
1571 ; X86-NEXT: movl $0, 8(%eax)
1572 ; X86-NEXT: movl $0, 4(%eax)
1573 ; X86-NEXT: movl $0, (%eax)
1576 ; X64-LABEL: shl_zext_lshr_outofrange:
1578 ; X64-NEXT: movq %rdi, %rax
1579 ; X64-NEXT: xorps %xmm0, %xmm0
1580 ; X64-NEXT: movaps %xmm0, 48(%rdi)
1581 ; X64-NEXT: movaps %xmm0, 32(%rdi)
1582 ; X64-NEXT: movaps %xmm0, 16(%rdi)
1583 ; X64-NEXT: movaps %xmm0, (%rdi)
1585 %1 = lshr <2 x i128> %a0, <i128 -1, i128 -1>
1586 %2 = zext <2 x i128> %1 to <2 x i256>
1587 %3 = shl <2 x i256> %2, <i256 128, i256 128>
1591 define i128 @lshr_shl_mask(i128 %a0) {
1592 ; X86-LABEL: lshr_shl_mask:
1594 ; X86-NEXT: pushl %edi
1595 ; X86-NEXT: .cfi_def_cfa_offset 8
1596 ; X86-NEXT: pushl %esi
1597 ; X86-NEXT: .cfi_def_cfa_offset 12
1598 ; X86-NEXT: .cfi_offset %esi, -12
1599 ; X86-NEXT: .cfi_offset %edi, -8
1600 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1601 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1602 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1603 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
1604 ; X86-NEXT: movl $2147483647, %edi # imm = 0x7FFFFFFF
1605 ; X86-NEXT: andl {{[0-9]+}}(%esp), %edi
1606 ; X86-NEXT: movl %edi, 12(%eax)
1607 ; X86-NEXT: movl %esi, 8(%eax)
1608 ; X86-NEXT: movl %edx, 4(%eax)
1609 ; X86-NEXT: movl %ecx, (%eax)
1610 ; X86-NEXT: popl %esi
1611 ; X86-NEXT: .cfi_def_cfa_offset 8
1612 ; X86-NEXT: popl %edi
1613 ; X86-NEXT: .cfi_def_cfa_offset 4
1616 ; X64-LABEL: lshr_shl_mask:
1618 ; X64-NEXT: movq %rdi, %rax
1619 ; X64-NEXT: movabsq $9223372036854775807, %rdx # imm = 0x7FFFFFFFFFFFFFFF
1620 ; X64-NEXT: andq %rsi, %rdx
1622 %1 = shl i128 %a0, 1
1623 %2 = lshr i128 %1, 1