1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=aarch64-unknown-unknown | FileCheck %s
4 ;==============================================================================;
5 ; the shift amount is negated (shiftbitwidth - shiftamt)
6 ;==============================================================================;
9 ;------------------------------------------------------------------------------;
11 define i32 @reg32_shl_by_negated(i32 %val, i32 %shamt) nounwind {
12 ; CHECK-LABEL: reg32_shl_by_negated:
14 ; CHECK-NEXT: neg w8, w1
15 ; CHECK-NEXT: lsl w0, w0, w8
17 %negshamt = sub i32 32, %shamt
18 %shifted = shl i32 %val, %negshamt
21 define i32 @load32_shl_by_negated(ptr %valptr, i32 %shamt) nounwind {
22 ; CHECK-LABEL: load32_shl_by_negated:
24 ; CHECK-NEXT: ldr w8, [x0]
25 ; CHECK-NEXT: neg w9, w1
26 ; CHECK-NEXT: lsl w0, w8, w9
28 %val = load i32, ptr %valptr
29 %negshamt = sub i32 32, %shamt
30 %shifted = shl i32 %val, %negshamt
33 define void @store32_shl_by_negated(i32 %val, ptr %dstptr, i32 %shamt) nounwind {
34 ; CHECK-LABEL: store32_shl_by_negated:
36 ; CHECK-NEXT: neg w8, w2
37 ; CHECK-NEXT: lsl w8, w0, w8
38 ; CHECK-NEXT: str w8, [x1]
40 %negshamt = sub i32 32, %shamt
41 %shifted = shl i32 %val, %negshamt
42 store i32 %shifted, ptr %dstptr
45 define void @modify32_shl_by_negated(ptr %valptr, i32 %shamt) nounwind {
46 ; CHECK-LABEL: modify32_shl_by_negated:
48 ; CHECK-NEXT: ldr w8, [x0]
49 ; CHECK-NEXT: neg w9, w1
50 ; CHECK-NEXT: lsl w8, w8, w9
51 ; CHECK-NEXT: str w8, [x0]
53 %val = load i32, ptr %valptr
54 %negshamt = sub i32 32, %shamt
55 %shifted = shl i32 %val, %negshamt
56 store i32 %shifted, ptr %valptr
59 define void @modify32_shl_by_negated_multi_use(ptr %valptr, i32 %shamt, ptr %shamtptr) nounwind {
60 ; CHECK-LABEL: modify32_shl_by_negated_multi_use:
62 ; CHECK-NEXT: ldr w8, [x0]
63 ; CHECK-NEXT: neg w9, w1
64 ; CHECK-NEXT: lsl w8, w8, w9
65 ; CHECK-NEXT: mov w9, #32 // =0x20
66 ; CHECK-NEXT: sub w9, w9, w1
67 ; CHECK-NEXT: str w8, [x0]
68 ; CHECK-NEXT: str w9, [x2]
70 %val = load i32, ptr %valptr
71 %negshamt = sub i32 32, %shamt
72 %shifted = shl i32 %val, %negshamt
73 store i32 %shifted, ptr %valptr
74 store i32 %negshamt, ptr %shamtptr
78 define i64 @reg64_shl_by_negated(i64 %val, i64 %shamt) nounwind {
79 ; CHECK-LABEL: reg64_shl_by_negated:
81 ; CHECK-NEXT: neg x8, x1
82 ; CHECK-NEXT: lsl x0, x0, x8
84 %negshamt = sub i64 64, %shamt
85 %shifted = shl i64 %val, %negshamt
88 define i64 @load64_shl_by_negated(ptr %valptr, i64 %shamt) nounwind {
89 ; CHECK-LABEL: load64_shl_by_negated:
91 ; CHECK-NEXT: ldr x8, [x0]
92 ; CHECK-NEXT: neg x9, x1
93 ; CHECK-NEXT: lsl x0, x8, x9
95 %val = load i64, ptr %valptr
96 %negshamt = sub i64 64, %shamt
97 %shifted = shl i64 %val, %negshamt
100 define void @store64_shl_by_negated(i64 %val, ptr %dstptr, i64 %shamt) nounwind {
101 ; CHECK-LABEL: store64_shl_by_negated:
103 ; CHECK-NEXT: neg x8, x2
104 ; CHECK-NEXT: lsl x8, x0, x8
105 ; CHECK-NEXT: str x8, [x1]
107 %negshamt = sub i64 64, %shamt
108 %shifted = shl i64 %val, %negshamt
109 store i64 %shifted, ptr %dstptr
112 define void @modify64_shl_by_negated(ptr %valptr, i64 %shamt) nounwind {
113 ; CHECK-LABEL: modify64_shl_by_negated:
115 ; CHECK-NEXT: ldr x8, [x0]
116 ; CHECK-NEXT: neg x9, x1
117 ; CHECK-NEXT: lsl x8, x8, x9
118 ; CHECK-NEXT: str x8, [x0]
120 %val = load i64, ptr %valptr
121 %negshamt = sub i64 64, %shamt
122 %shifted = shl i64 %val, %negshamt
123 store i64 %shifted, ptr %valptr
126 define void @modify64_shl_by_negated_multi_use(ptr %valptr, i64 %shamt, ptr %shamtptr) nounwind {
127 ; CHECK-LABEL: modify64_shl_by_negated_multi_use:
129 ; CHECK-NEXT: ldr x8, [x0]
130 ; CHECK-NEXT: neg x9, x1
131 ; CHECK-NEXT: lsl x8, x8, x9
132 ; CHECK-NEXT: mov w9, #64 // =0x40
133 ; CHECK-NEXT: sub x9, x9, x1
134 ; CHECK-NEXT: str x8, [x0]
135 ; CHECK-NEXT: str x9, [x2]
137 %val = load i64, ptr %valptr
138 %negshamt = sub i64 64, %shamt
139 %shifted = shl i64 %val, %negshamt
140 store i64 %shifted, ptr %valptr
141 store i64 %negshamt, ptr %shamtptr
145 ; logical shift right
146 ;------------------------------------------------------------------------------;
148 define i32 @reg32_lshr_by_negated(i32 %val, i32 %shamt) nounwind {
149 ; CHECK-LABEL: reg32_lshr_by_negated:
151 ; CHECK-NEXT: neg w8, w1
152 ; CHECK-NEXT: lsr w0, w0, w8
154 %negshamt = sub i32 32, %shamt
155 %shifted = lshr i32 %val, %negshamt
158 define i32 @load32_lshr_by_negated(ptr %valptr, i32 %shamt) nounwind {
159 ; CHECK-LABEL: load32_lshr_by_negated:
161 ; CHECK-NEXT: ldr w8, [x0]
162 ; CHECK-NEXT: neg w9, w1
163 ; CHECK-NEXT: lsr w0, w8, w9
165 %val = load i32, ptr %valptr
166 %negshamt = sub i32 32, %shamt
167 %shifted = lshr i32 %val, %negshamt
170 define void @store32_lshr_by_negated(i32 %val, ptr %dstptr, i32 %shamt) nounwind {
171 ; CHECK-LABEL: store32_lshr_by_negated:
173 ; CHECK-NEXT: neg w8, w2
174 ; CHECK-NEXT: lsr w8, w0, w8
175 ; CHECK-NEXT: str w8, [x1]
177 %negshamt = sub i32 32, %shamt
178 %shifted = lshr i32 %val, %negshamt
179 store i32 %shifted, ptr %dstptr
182 define void @modify32_lshr_by_negated(ptr %valptr, i32 %shamt) nounwind {
183 ; CHECK-LABEL: modify32_lshr_by_negated:
185 ; CHECK-NEXT: ldr w8, [x0]
186 ; CHECK-NEXT: neg w9, w1
187 ; CHECK-NEXT: lsr w8, w8, w9
188 ; CHECK-NEXT: str w8, [x0]
190 %val = load i32, ptr %valptr
191 %negshamt = sub i32 32, %shamt
192 %shifted = lshr i32 %val, %negshamt
193 store i32 %shifted, ptr %valptr
196 define void @modify32_lshr_by_negated_multi_use(ptr %valptr, i32 %shamt, ptr %shamtptr) nounwind {
197 ; CHECK-LABEL: modify32_lshr_by_negated_multi_use:
199 ; CHECK-NEXT: ldr w8, [x0]
200 ; CHECK-NEXT: neg w9, w1
201 ; CHECK-NEXT: lsr w8, w8, w9
202 ; CHECK-NEXT: mov w9, #32 // =0x20
203 ; CHECK-NEXT: sub w9, w9, w1
204 ; CHECK-NEXT: str w8, [x0]
205 ; CHECK-NEXT: str w9, [x2]
207 %val = load i32, ptr %valptr
208 %negshamt = sub i32 32, %shamt
209 %shifted = lshr i32 %val, %negshamt
210 store i32 %shifted, ptr %valptr
211 store i32 %negshamt, ptr %shamtptr
215 define i64 @reg64_lshr_by_negated(i64 %val, i64 %shamt) nounwind {
216 ; CHECK-LABEL: reg64_lshr_by_negated:
218 ; CHECK-NEXT: neg x8, x1
219 ; CHECK-NEXT: lsr x0, x0, x8
221 %negshamt = sub i64 64, %shamt
222 %shifted = lshr i64 %val, %negshamt
225 define i64 @load64_lshr_by_negated(ptr %valptr, i64 %shamt) nounwind {
226 ; CHECK-LABEL: load64_lshr_by_negated:
228 ; CHECK-NEXT: ldr x8, [x0]
229 ; CHECK-NEXT: neg x9, x1
230 ; CHECK-NEXT: lsr x0, x8, x9
232 %val = load i64, ptr %valptr
233 %negshamt = sub i64 64, %shamt
234 %shifted = lshr i64 %val, %negshamt
237 define void @store64_lshr_by_negated(i64 %val, ptr %dstptr, i64 %shamt) nounwind {
238 ; CHECK-LABEL: store64_lshr_by_negated:
240 ; CHECK-NEXT: neg x8, x2
241 ; CHECK-NEXT: lsr x8, x0, x8
242 ; CHECK-NEXT: str x8, [x1]
244 %negshamt = sub i64 64, %shamt
245 %shifted = lshr i64 %val, %negshamt
246 store i64 %shifted, ptr %dstptr
249 define void @modify64_lshr_by_negated(ptr %valptr, i64 %shamt) nounwind {
250 ; CHECK-LABEL: modify64_lshr_by_negated:
252 ; CHECK-NEXT: ldr x8, [x0]
253 ; CHECK-NEXT: neg x9, x1
254 ; CHECK-NEXT: lsr x8, x8, x9
255 ; CHECK-NEXT: str x8, [x0]
257 %val = load i64, ptr %valptr
258 %negshamt = sub i64 64, %shamt
259 %shifted = lshr i64 %val, %negshamt
260 store i64 %shifted, ptr %valptr
263 define void @modify64_lshr_by_negated_multi_use(ptr %valptr, i64 %shamt, ptr %shamtptr) nounwind {
264 ; CHECK-LABEL: modify64_lshr_by_negated_multi_use:
266 ; CHECK-NEXT: ldr x8, [x0]
267 ; CHECK-NEXT: neg x9, x1
268 ; CHECK-NEXT: lsr x8, x8, x9
269 ; CHECK-NEXT: mov w9, #64 // =0x40
270 ; CHECK-NEXT: sub x9, x9, x1
271 ; CHECK-NEXT: str x8, [x0]
272 ; CHECK-NEXT: str x9, [x2]
274 %val = load i64, ptr %valptr
275 %negshamt = sub i64 64, %shamt
276 %shifted = lshr i64 %val, %negshamt
277 store i64 %shifted, ptr %valptr
278 store i64 %negshamt, ptr %shamtptr
282 ; arithmetic shift right
283 ;------------------------------------------------------------------------------;
285 define i32 @reg32_ashr_by_negated(i32 %val, i32 %shamt) nounwind {
286 ; CHECK-LABEL: reg32_ashr_by_negated:
288 ; CHECK-NEXT: neg w8, w1
289 ; CHECK-NEXT: asr w0, w0, w8
291 %negshamt = sub i32 32, %shamt
292 %shifted = ashr i32 %val, %negshamt
295 define i32 @load32_ashr_by_negated(ptr %valptr, i32 %shamt) nounwind {
296 ; CHECK-LABEL: load32_ashr_by_negated:
298 ; CHECK-NEXT: ldr w8, [x0]
299 ; CHECK-NEXT: neg w9, w1
300 ; CHECK-NEXT: asr w0, w8, w9
302 %val = load i32, ptr %valptr
303 %negshamt = sub i32 32, %shamt
304 %shifted = ashr i32 %val, %negshamt
307 define void @store32_ashr_by_negated(i32 %val, ptr %dstptr, i32 %shamt) nounwind {
308 ; CHECK-LABEL: store32_ashr_by_negated:
310 ; CHECK-NEXT: neg w8, w2
311 ; CHECK-NEXT: asr w8, w0, w8
312 ; CHECK-NEXT: str w8, [x1]
314 %negshamt = sub i32 32, %shamt
315 %shifted = ashr i32 %val, %negshamt
316 store i32 %shifted, ptr %dstptr
319 define void @modify32_ashr_by_negated(ptr %valptr, i32 %shamt) nounwind {
320 ; CHECK-LABEL: modify32_ashr_by_negated:
322 ; CHECK-NEXT: ldr w8, [x0]
323 ; CHECK-NEXT: neg w9, w1
324 ; CHECK-NEXT: asr w8, w8, w9
325 ; CHECK-NEXT: str w8, [x0]
327 %val = load i32, ptr %valptr
328 %negshamt = sub i32 32, %shamt
329 %shifted = ashr i32 %val, %negshamt
330 store i32 %shifted, ptr %valptr
333 define void @modify32_ashr_by_negated_multi_use(ptr %valptr, i32 %shamt, ptr %shamtptr) nounwind {
334 ; CHECK-LABEL: modify32_ashr_by_negated_multi_use:
336 ; CHECK-NEXT: ldr w8, [x0]
337 ; CHECK-NEXT: neg w9, w1
338 ; CHECK-NEXT: asr w8, w8, w9
339 ; CHECK-NEXT: mov w9, #32 // =0x20
340 ; CHECK-NEXT: sub w9, w9, w1
341 ; CHECK-NEXT: str w8, [x0]
342 ; CHECK-NEXT: str w9, [x2]
344 %val = load i32, ptr %valptr
345 %negshamt = sub i32 32, %shamt
346 %shifted = ashr i32 %val, %negshamt
347 store i32 %shifted, ptr %valptr
348 store i32 %negshamt, ptr %shamtptr
352 define i64 @reg64_ashr_by_negated(i64 %val, i64 %shamt) nounwind {
353 ; CHECK-LABEL: reg64_ashr_by_negated:
355 ; CHECK-NEXT: neg x8, x1
356 ; CHECK-NEXT: asr x0, x0, x8
358 %negshamt = sub i64 64, %shamt
359 %shifted = ashr i64 %val, %negshamt
362 define i64 @load64_ashr_by_negated(ptr %valptr, i64 %shamt) nounwind {
363 ; CHECK-LABEL: load64_ashr_by_negated:
365 ; CHECK-NEXT: ldr x8, [x0]
366 ; CHECK-NEXT: neg x9, x1
367 ; CHECK-NEXT: asr x0, x8, x9
369 %val = load i64, ptr %valptr
370 %negshamt = sub i64 64, %shamt
371 %shifted = ashr i64 %val, %negshamt
374 define void @store64_ashr_by_negated(i64 %val, ptr %dstptr, i64 %shamt) nounwind {
375 ; CHECK-LABEL: store64_ashr_by_negated:
377 ; CHECK-NEXT: neg x8, x2
378 ; CHECK-NEXT: asr x8, x0, x8
379 ; CHECK-NEXT: str x8, [x1]
381 %negshamt = sub i64 64, %shamt
382 %shifted = ashr i64 %val, %negshamt
383 store i64 %shifted, ptr %dstptr
386 define void @modify64_ashr_by_negated(ptr %valptr, i64 %shamt) nounwind {
387 ; CHECK-LABEL: modify64_ashr_by_negated:
389 ; CHECK-NEXT: ldr x8, [x0]
390 ; CHECK-NEXT: neg x9, x1
391 ; CHECK-NEXT: asr x8, x8, x9
392 ; CHECK-NEXT: str x8, [x0]
394 %val = load i64, ptr %valptr
395 %negshamt = sub i64 64, %shamt
396 %shifted = ashr i64 %val, %negshamt
397 store i64 %shifted, ptr %valptr
400 define void @modify64_ashr_by_negated_multi_use(ptr %valptr, i64 %shamt, ptr %shamtptr) nounwind {
401 ; CHECK-LABEL: modify64_ashr_by_negated_multi_use:
403 ; CHECK-NEXT: ldr x8, [x0]
404 ; CHECK-NEXT: neg x9, x1
405 ; CHECK-NEXT: asr x8, x8, x9
406 ; CHECK-NEXT: mov w9, #64 // =0x40
407 ; CHECK-NEXT: sub x9, x9, x1
408 ; CHECK-NEXT: str x8, [x0]
409 ; CHECK-NEXT: str x9, [x2]
411 %val = load i64, ptr %valptr
412 %negshamt = sub i64 64, %shamt
413 %shifted = ashr i64 %val, %negshamt
414 store i64 %shifted, ptr %valptr
415 store i64 %negshamt, ptr %shamtptr
419 ;==============================================================================;
420 ; the shift amount is complemented (shiftbitwidth - 1 - shiftamt)
421 ;==============================================================================;
424 ;------------------------------------------------------------------------------;
426 define i32 @reg32_shl_by_complemented(i32 %val, i32 %shamt) nounwind {
427 ; CHECK-LABEL: reg32_shl_by_complemented:
429 ; CHECK-NEXT: mvn w8, w1
430 ; CHECK-NEXT: lsl w0, w0, w8
432 %negshamt = sub i32 31, %shamt
433 %shifted = shl i32 %val, %negshamt
436 define i32 @load32_shl_by_complemented(ptr %valptr, i32 %shamt) nounwind {
437 ; CHECK-LABEL: load32_shl_by_complemented:
439 ; CHECK-NEXT: ldr w8, [x0]
440 ; CHECK-NEXT: mvn w9, w1
441 ; CHECK-NEXT: lsl w0, w8, w9
443 %val = load i32, ptr %valptr
444 %negshamt = sub i32 31, %shamt
445 %shifted = shl i32 %val, %negshamt
448 define void @store32_shl_by_complemented(i32 %val, ptr %dstptr, i32 %shamt) nounwind {
449 ; CHECK-LABEL: store32_shl_by_complemented:
451 ; CHECK-NEXT: mvn w8, w2
452 ; CHECK-NEXT: lsl w8, w0, w8
453 ; CHECK-NEXT: str w8, [x1]
455 %negshamt = sub i32 31, %shamt
456 %shifted = shl i32 %val, %negshamt
457 store i32 %shifted, ptr %dstptr
460 define void @modify32_shl_by_complemented(ptr %valptr, i32 %shamt) nounwind {
461 ; CHECK-LABEL: modify32_shl_by_complemented:
463 ; CHECK-NEXT: ldr w8, [x0]
464 ; CHECK-NEXT: mvn w9, w1
465 ; CHECK-NEXT: lsl w8, w8, w9
466 ; CHECK-NEXT: str w8, [x0]
468 %val = load i32, ptr %valptr
469 %negshamt = sub i32 31, %shamt
470 %shifted = shl i32 %val, %negshamt
471 store i32 %shifted, ptr %valptr
474 define void @modify32_shl_by_complemented_multi_use(ptr %valptr, i32 %shamt, ptr %shamtptr) nounwind {
475 ; CHECK-LABEL: modify32_shl_by_complemented_multi_use:
477 ; CHECK-NEXT: ldr w8, [x0]
478 ; CHECK-NEXT: mvn w9, w1
479 ; CHECK-NEXT: lsl w8, w8, w9
480 ; CHECK-NEXT: mov w9, #31 // =0x1f
481 ; CHECK-NEXT: sub w9, w9, w1
482 ; CHECK-NEXT: str w8, [x0]
483 ; CHECK-NEXT: str w9, [x2]
485 %val = load i32, ptr %valptr
486 %negshamt = sub i32 31, %shamt
487 %shifted = shl i32 %val, %negshamt
488 store i32 %shifted, ptr %valptr
489 store i32 %negshamt, ptr %shamtptr
493 define i64 @reg64_shl_by_complemented(i64 %val, i64 %shamt) nounwind {
494 ; CHECK-LABEL: reg64_shl_by_complemented:
496 ; CHECK-NEXT: mvn x8, x1
497 ; CHECK-NEXT: lsl x0, x0, x8
499 %negshamt = sub i64 63, %shamt
500 %shifted = shl i64 %val, %negshamt
503 define i64 @load64_shl_by_complemented(ptr %valptr, i64 %shamt) nounwind {
504 ; CHECK-LABEL: load64_shl_by_complemented:
506 ; CHECK-NEXT: ldr x8, [x0]
507 ; CHECK-NEXT: mvn x9, x1
508 ; CHECK-NEXT: lsl x0, x8, x9
510 %val = load i64, ptr %valptr
511 %negshamt = sub i64 63, %shamt
512 %shifted = shl i64 %val, %negshamt
515 define void @store64_shl_by_complemented(i64 %val, ptr %dstptr, i64 %shamt) nounwind {
516 ; CHECK-LABEL: store64_shl_by_complemented:
518 ; CHECK-NEXT: mvn x8, x2
519 ; CHECK-NEXT: lsl x8, x0, x8
520 ; CHECK-NEXT: str x8, [x1]
522 %negshamt = sub i64 63, %shamt
523 %shifted = shl i64 %val, %negshamt
524 store i64 %shifted, ptr %dstptr
527 define void @modify64_shl_by_complemented(ptr %valptr, i64 %shamt) nounwind {
528 ; CHECK-LABEL: modify64_shl_by_complemented:
530 ; CHECK-NEXT: ldr x8, [x0]
531 ; CHECK-NEXT: mvn x9, x1
532 ; CHECK-NEXT: lsl x8, x8, x9
533 ; CHECK-NEXT: str x8, [x0]
535 %val = load i64, ptr %valptr
536 %negshamt = sub i64 63, %shamt
537 %shifted = shl i64 %val, %negshamt
538 store i64 %shifted, ptr %valptr
541 define void @modify64_shl_by_complemented_multi_use(ptr %valptr, i64 %shamt, ptr %shamtptr) nounwind {
542 ; CHECK-LABEL: modify64_shl_by_complemented_multi_use:
544 ; CHECK-NEXT: ldr x8, [x0]
545 ; CHECK-NEXT: mvn x9, x1
546 ; CHECK-NEXT: lsl x8, x8, x9
547 ; CHECK-NEXT: mov w9, #63 // =0x3f
548 ; CHECK-NEXT: sub x9, x9, x1
549 ; CHECK-NEXT: str x8, [x0]
550 ; CHECK-NEXT: str x9, [x2]
552 %val = load i64, ptr %valptr
553 %negshamt = sub i64 63, %shamt
554 %shifted = shl i64 %val, %negshamt
555 store i64 %shifted, ptr %valptr
556 store i64 %negshamt, ptr %shamtptr
560 ; logical shift right
561 ;------------------------------------------------------------------------------;
563 define i32 @reg32_lshr_by_complemented(i32 %val, i32 %shamt) nounwind {
564 ; CHECK-LABEL: reg32_lshr_by_complemented:
566 ; CHECK-NEXT: mvn w8, w1
567 ; CHECK-NEXT: lsr w0, w0, w8
569 %negshamt = sub i32 31, %shamt
570 %shifted = lshr i32 %val, %negshamt
573 define i32 @load32_lshr_by_complemented(ptr %valptr, i32 %shamt) nounwind {
574 ; CHECK-LABEL: load32_lshr_by_complemented:
576 ; CHECK-NEXT: ldr w8, [x0]
577 ; CHECK-NEXT: mvn w9, w1
578 ; CHECK-NEXT: lsr w0, w8, w9
580 %val = load i32, ptr %valptr
581 %negshamt = sub i32 31, %shamt
582 %shifted = lshr i32 %val, %negshamt
585 define void @store32_lshr_by_complemented(i32 %val, ptr %dstptr, i32 %shamt) nounwind {
586 ; CHECK-LABEL: store32_lshr_by_complemented:
588 ; CHECK-NEXT: mvn w8, w2
589 ; CHECK-NEXT: lsr w8, w0, w8
590 ; CHECK-NEXT: str w8, [x1]
592 %negshamt = sub i32 31, %shamt
593 %shifted = lshr i32 %val, %negshamt
594 store i32 %shifted, ptr %dstptr
597 define void @modify32_lshr_by_complemented(ptr %valptr, i32 %shamt) nounwind {
598 ; CHECK-LABEL: modify32_lshr_by_complemented:
600 ; CHECK-NEXT: ldr w8, [x0]
601 ; CHECK-NEXT: mvn w9, w1
602 ; CHECK-NEXT: lsr w8, w8, w9
603 ; CHECK-NEXT: str w8, [x0]
605 %val = load i32, ptr %valptr
606 %negshamt = sub i32 31, %shamt
607 %shifted = lshr i32 %val, %negshamt
608 store i32 %shifted, ptr %valptr
611 define void @modify32_lshr_by_complemented_multi_use(ptr %valptr, i32 %shamt, ptr %shamtptr) nounwind {
612 ; CHECK-LABEL: modify32_lshr_by_complemented_multi_use:
614 ; CHECK-NEXT: ldr w8, [x0]
615 ; CHECK-NEXT: mvn w9, w1
616 ; CHECK-NEXT: lsr w8, w8, w9
617 ; CHECK-NEXT: mov w9, #31 // =0x1f
618 ; CHECK-NEXT: sub w9, w9, w1
619 ; CHECK-NEXT: str w8, [x0]
620 ; CHECK-NEXT: str w9, [x2]
622 %val = load i32, ptr %valptr
623 %negshamt = sub i32 31, %shamt
624 %shifted = lshr i32 %val, %negshamt
625 store i32 %shifted, ptr %valptr
626 store i32 %negshamt, ptr %shamtptr
630 define i64 @reg64_lshr_by_complemented(i64 %val, i64 %shamt) nounwind {
631 ; CHECK-LABEL: reg64_lshr_by_complemented:
633 ; CHECK-NEXT: mvn x8, x1
634 ; CHECK-NEXT: lsr x0, x0, x8
636 %negshamt = sub i64 63, %shamt
637 %shifted = lshr i64 %val, %negshamt
640 define i64 @load64_lshr_by_complemented(ptr %valptr, i64 %shamt) nounwind {
641 ; CHECK-LABEL: load64_lshr_by_complemented:
643 ; CHECK-NEXT: ldr x8, [x0]
644 ; CHECK-NEXT: mvn x9, x1
645 ; CHECK-NEXT: lsr x0, x8, x9
647 %val = load i64, ptr %valptr
648 %negshamt = sub i64 63, %shamt
649 %shifted = lshr i64 %val, %negshamt
652 define void @store64_lshr_by_complemented(i64 %val, ptr %dstptr, i64 %shamt) nounwind {
653 ; CHECK-LABEL: store64_lshr_by_complemented:
655 ; CHECK-NEXT: mvn x8, x2
656 ; CHECK-NEXT: lsr x8, x0, x8
657 ; CHECK-NEXT: str x8, [x1]
659 %negshamt = sub i64 63, %shamt
660 %shifted = lshr i64 %val, %negshamt
661 store i64 %shifted, ptr %dstptr
664 define void @modify64_lshr_by_complemented(ptr %valptr, i64 %shamt) nounwind {
665 ; CHECK-LABEL: modify64_lshr_by_complemented:
667 ; CHECK-NEXT: ldr x8, [x0]
668 ; CHECK-NEXT: mvn x9, x1
669 ; CHECK-NEXT: lsr x8, x8, x9
670 ; CHECK-NEXT: str x8, [x0]
672 %val = load i64, ptr %valptr
673 %negshamt = sub i64 63, %shamt
674 %shifted = lshr i64 %val, %negshamt
675 store i64 %shifted, ptr %valptr
678 define void @modify64_lshr_by_complemented_multi_use(ptr %valptr, i64 %shamt, ptr %shamtptr) nounwind {
679 ; CHECK-LABEL: modify64_lshr_by_complemented_multi_use:
681 ; CHECK-NEXT: ldr x8, [x0]
682 ; CHECK-NEXT: mvn x9, x1
683 ; CHECK-NEXT: lsr x8, x8, x9
684 ; CHECK-NEXT: mov w9, #63 // =0x3f
685 ; CHECK-NEXT: sub x9, x9, x1
686 ; CHECK-NEXT: str x8, [x0]
687 ; CHECK-NEXT: str x9, [x2]
689 %val = load i64, ptr %valptr
690 %negshamt = sub i64 63, %shamt
691 %shifted = lshr i64 %val, %negshamt
692 store i64 %shifted, ptr %valptr
693 store i64 %negshamt, ptr %shamtptr
697 ; arithmetic shift right
698 ;------------------------------------------------------------------------------;
700 define i32 @reg32_ashr_by_complemented(i32 %val, i32 %shamt) nounwind {
701 ; CHECK-LABEL: reg32_ashr_by_complemented:
703 ; CHECK-NEXT: mvn w8, w1
704 ; CHECK-NEXT: asr w0, w0, w8
706 %negshamt = sub i32 31, %shamt
707 %shifted = ashr i32 %val, %negshamt
710 define i32 @load32_ashr_by_complemented(ptr %valptr, i32 %shamt) nounwind {
711 ; CHECK-LABEL: load32_ashr_by_complemented:
713 ; CHECK-NEXT: ldr w8, [x0]
714 ; CHECK-NEXT: mvn w9, w1
715 ; CHECK-NEXT: asr w0, w8, w9
717 %val = load i32, ptr %valptr
718 %negshamt = sub i32 31, %shamt
719 %shifted = ashr i32 %val, %negshamt
722 define void @store32_ashr_by_complemented(i32 %val, ptr %dstptr, i32 %shamt) nounwind {
723 ; CHECK-LABEL: store32_ashr_by_complemented:
725 ; CHECK-NEXT: mvn w8, w2
726 ; CHECK-NEXT: asr w8, w0, w8
727 ; CHECK-NEXT: str w8, [x1]
729 %negshamt = sub i32 31, %shamt
730 %shifted = ashr i32 %val, %negshamt
731 store i32 %shifted, ptr %dstptr
734 define void @modify32_ashr_by_complemented(ptr %valptr, i32 %shamt) nounwind {
735 ; CHECK-LABEL: modify32_ashr_by_complemented:
737 ; CHECK-NEXT: ldr w8, [x0]
738 ; CHECK-NEXT: mvn w9, w1
739 ; CHECK-NEXT: asr w8, w8, w9
740 ; CHECK-NEXT: str w8, [x0]
742 %val = load i32, ptr %valptr
743 %negshamt = sub i32 31, %shamt
744 %shifted = ashr i32 %val, %negshamt
745 store i32 %shifted, ptr %valptr
748 define void @modify32_ashr_by_complemented_multi_use(ptr %valptr, i32 %shamt, ptr %shamtptr) nounwind {
749 ; CHECK-LABEL: modify32_ashr_by_complemented_multi_use:
751 ; CHECK-NEXT: ldr w8, [x0]
752 ; CHECK-NEXT: mvn w9, w1
753 ; CHECK-NEXT: asr w8, w8, w9
754 ; CHECK-NEXT: mov w9, #31 // =0x1f
755 ; CHECK-NEXT: sub w9, w9, w1
756 ; CHECK-NEXT: str w8, [x0]
757 ; CHECK-NEXT: str w9, [x2]
759 %val = load i32, ptr %valptr
760 %negshamt = sub i32 31, %shamt
761 %shifted = ashr i32 %val, %negshamt
762 store i32 %shifted, ptr %valptr
763 store i32 %negshamt, ptr %shamtptr
767 define i64 @reg64_ashr_by_complemented(i64 %val, i64 %shamt) nounwind {
768 ; CHECK-LABEL: reg64_ashr_by_complemented:
770 ; CHECK-NEXT: mvn x8, x1
771 ; CHECK-NEXT: asr x0, x0, x8
773 %negshamt = sub i64 63, %shamt
774 %shifted = ashr i64 %val, %negshamt
777 define i64 @load64_ashr_by_complemented(ptr %valptr, i64 %shamt) nounwind {
778 ; CHECK-LABEL: load64_ashr_by_complemented:
780 ; CHECK-NEXT: ldr x8, [x0]
781 ; CHECK-NEXT: mvn x9, x1
782 ; CHECK-NEXT: asr x0, x8, x9
784 %val = load i64, ptr %valptr
785 %negshamt = sub i64 63, %shamt
786 %shifted = ashr i64 %val, %negshamt
789 define void @store64_ashr_by_complemented(i64 %val, ptr %dstptr, i64 %shamt) nounwind {
790 ; CHECK-LABEL: store64_ashr_by_complemented:
792 ; CHECK-NEXT: mvn x8, x2
793 ; CHECK-NEXT: asr x8, x0, x8
794 ; CHECK-NEXT: str x8, [x1]
796 %negshamt = sub i64 63, %shamt
797 %shifted = ashr i64 %val, %negshamt
798 store i64 %shifted, ptr %dstptr
801 define void @modify64_ashr_by_complemented(ptr %valptr, i64 %shamt) nounwind {
802 ; CHECK-LABEL: modify64_ashr_by_complemented:
804 ; CHECK-NEXT: ldr x8, [x0]
805 ; CHECK-NEXT: mvn x9, x1
806 ; CHECK-NEXT: asr x8, x8, x9
807 ; CHECK-NEXT: str x8, [x0]
809 %val = load i64, ptr %valptr
810 %negshamt = sub i64 63, %shamt
811 %shifted = ashr i64 %val, %negshamt
812 store i64 %shifted, ptr %valptr
815 define void @modify64_ashr_by_complemented_multi_use(ptr %valptr, i64 %shamt, ptr %shamtptr) nounwind {
816 ; CHECK-LABEL: modify64_ashr_by_complemented_multi_use:
818 ; CHECK-NEXT: ldr x8, [x0]
819 ; CHECK-NEXT: mvn x9, x1
820 ; CHECK-NEXT: asr x8, x8, x9
821 ; CHECK-NEXT: mov w9, #63 // =0x3f
822 ; CHECK-NEXT: sub x9, x9, x1
823 ; CHECK-NEXT: str x8, [x0]
824 ; CHECK-NEXT: str x9, [x2]
826 %val = load i64, ptr %valptr
827 %negshamt = sub i64 63, %shamt
828 %shifted = ashr i64 %val, %negshamt
829 store i64 %shifted, ptr %valptr
830 store i64 %negshamt, ptr %shamtptr
834 ;||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||;
835 ; next let's only test simple reg pattern, and only lshr.
836 ;||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||;
838 ;==============================================================================;
839 ; subtraction from negated shift amount
841 define i32 @reg32_lshr_by_sub_from_negated(i32 %val, i32 %a, i32 %b) nounwind {
842 ; CHECK-LABEL: reg32_lshr_by_sub_from_negated:
844 ; CHECK-NEXT: add w8, w1, w2
845 ; CHECK-NEXT: neg w8, w8
846 ; CHECK-NEXT: lsr w0, w0, w8
848 %nega = sub i32 32, %a
849 %negasubb = sub i32 %nega, %b
850 %shifted = lshr i32 %val, %negasubb
853 define i64 @reg64_lshr_by_sub_from_negated(i64 %val, i64 %a, i64 %b) nounwind {
854 ; CHECK-LABEL: reg64_lshr_by_sub_from_negated:
856 ; CHECK-NEXT: add x8, x1, x2
857 ; CHECK-NEXT: neg x8, x8
858 ; CHECK-NEXT: lsr x0, x0, x8
860 %nega = sub i64 64, %a
861 %negasubb = sub i64 %nega, %b
862 %shifted = lshr i64 %val, %negasubb
866 ;==============================================================================;
867 ; subtraction of negated shift amount
869 define i32 @reg32_lshr_by_sub_of_negated(i32 %val, i32 %a, i32 %b) nounwind {
870 ; CHECK-LABEL: reg32_lshr_by_sub_of_negated:
872 ; CHECK-NEXT: add w8, w1, w2
873 ; CHECK-NEXT: lsr w0, w0, w8
875 %nega = sub i32 32, %a
876 %negasubb = sub i32 %b, %nega
877 %shifted = lshr i32 %val, %negasubb
880 define i64 @reg64_lshr_by_sub_of_negated(i64 %val, i64 %a, i64 %b) nounwind {
881 ; CHECK-LABEL: reg64_lshr_by_sub_of_negated:
883 ; CHECK-NEXT: add x8, x1, x2
884 ; CHECK-NEXT: lsr x0, x0, x8
886 %nega = sub i64 64, %a
887 %negasubb = sub i64 %b, %nega
888 %shifted = lshr i64 %val, %negasubb
892 ;==============================================================================;
893 ; add to negated shift amount
896 define i32 @reg32_lshr_by_add_to_negated(i32 %val, i32 %a, i32 %b) nounwind {
897 ; CHECK-LABEL: reg32_lshr_by_add_to_negated:
899 ; CHECK-NEXT: sub w8, w2, w1
900 ; CHECK-NEXT: lsr w0, w0, w8
902 %nega = sub i32 32, %a
903 %negasubb = add i32 %nega, %b
904 %shifted = lshr i32 %val, %negasubb
907 define i64 @reg64_lshr_by_add_to_negated(i64 %val, i64 %a, i64 %b) nounwind {
908 ; CHECK-LABEL: reg64_lshr_by_add_to_negated:
910 ; CHECK-NEXT: sub x8, x2, x1
911 ; CHECK-NEXT: lsr x0, x0, x8
913 %nega = sub i64 64, %a
914 %negasubb = add i64 %nega, %b
915 %shifted = lshr i64 %val, %negasubb
919 ;==============================================================================;
920 ; subtraction of negated shift amounts
922 define i32 @reg32_lshr_by_sub_of_negated_amts(i32 %val, i32 %a, i32 %b) nounwind {
923 ; CHECK-LABEL: reg32_lshr_by_sub_of_negated_amts:
925 ; CHECK-NEXT: sub w8, w2, w1
926 ; CHECK-NEXT: lsr w0, w0, w8
928 %nega = sub i32 32, %a
929 %negb = sub i32 32, %b
930 %negasubnegb = sub i32 %nega, %negb
931 %shifted = lshr i32 %val, %negasubnegb
934 define i64 @reg64_lshr_by_sub_of_negated_amts(i64 %val, i64 %a, i64 %b) nounwind {
935 ; CHECK-LABEL: reg64_lshr_by_sub_of_negated_amts:
937 ; CHECK-NEXT: sub x8, x2, x1
938 ; CHECK-NEXT: lsr x0, x0, x8
940 %nega = sub i64 64, %a
941 %negb = sub i64 64, %b
942 %negasubnegb = sub i64 %nega, %negb
943 %shifted = lshr i64 %val, %negasubnegb
947 ;==============================================================================;
948 ; addition of negated shift amounts
950 define i32 @reg32_lshr_by_add_of_negated_amts(i32 %val, i32 %a, i32 %b) nounwind {
951 ; CHECK-LABEL: reg32_lshr_by_add_of_negated_amts:
953 ; CHECK-NEXT: add w8, w1, w2
954 ; CHECK-NEXT: neg w8, w8
955 ; CHECK-NEXT: lsr w0, w0, w8
957 %nega = sub i32 32, %a
958 %negb = sub i32 32, %b
959 %negasubnegb = add i32 %nega, %negb
960 %shifted = lshr i32 %val, %negasubnegb
963 define i64 @reg64_lshr_by_add_of_negated_amts(i64 %val, i64 %a, i64 %b) nounwind {
964 ; CHECK-LABEL: reg64_lshr_by_add_of_negated_amts:
966 ; CHECK-NEXT: add x8, x1, x2
967 ; CHECK-NEXT: neg x8, x8
968 ; CHECK-NEXT: lsr x0, x0, x8
970 %nega = sub i64 64, %a
971 %negb = sub i64 64, %b
972 %negasubnegb = add i64 %nega, %negb
973 %shifted = lshr i64 %val, %negasubnegb
977 ;==============================================================================;
978 ; and patterns with an actual negation+addition
980 define i32 @reg32_lshr_by_negated_unfolded(i32 %val, i32 %shamt) nounwind {
981 ; CHECK-LABEL: reg32_lshr_by_negated_unfolded:
983 ; CHECK-NEXT: neg w8, w1
984 ; CHECK-NEXT: lsr w0, w0, w8
986 %negshamt = sub i32 0, %shamt
987 %negaaddbitwidth = add i32 %negshamt, 32
988 %shifted = lshr i32 %val, %negaaddbitwidth
991 define i64 @reg64_lshr_by_negated_unfolded(i64 %val, i64 %shamt) nounwind {
992 ; CHECK-LABEL: reg64_lshr_by_negated_unfolded:
994 ; CHECK-NEXT: neg x8, x1
995 ; CHECK-NEXT: lsr x0, x0, x8
997 %negshamt = sub i64 0, %shamt
998 %negaaddbitwidth = add i64 %negshamt, 64
999 %shifted = lshr i64 %val, %negaaddbitwidth
1003 define i32 @reg32_lshr_by_negated_unfolded_sub_b(i32 %val, i32 %a, i32 %b) nounwind {
1004 ; CHECK-LABEL: reg32_lshr_by_negated_unfolded_sub_b:
1006 ; CHECK-NEXT: add w8, w1, w2
1007 ; CHECK-NEXT: neg w8, w8
1008 ; CHECK-NEXT: lsr w0, w0, w8
1010 %nega = sub i32 0, %a
1011 %negaaddbitwidth = add i32 %nega, 32
1012 %negaaddbitwidthsubb = sub i32 %negaaddbitwidth, %b
1013 %shifted = lshr i32 %val, %negaaddbitwidthsubb
1016 define i64 @reg64_lshr_by_negated_unfolded_sub_b(i64 %val, i64 %a, i64 %b) nounwind {
1017 ; CHECK-LABEL: reg64_lshr_by_negated_unfolded_sub_b:
1019 ; CHECK-NEXT: add x8, x1, x2
1020 ; CHECK-NEXT: neg x8, x8
1021 ; CHECK-NEXT: lsr x0, x0, x8
1023 %nega = sub i64 0, %a
1024 %negaaddbitwidth = add i64 %nega, 64
1025 %negaaddbitwidthsubb = sub i64 %negaaddbitwidth, %b
1026 %shifted = lshr i64 %val, %negaaddbitwidthsubb
1030 define i32 @reg32_lshr_by_b_sub_negated_unfolded(i32 %val, i32 %a, i32 %b) nounwind {
1031 ; CHECK-LABEL: reg32_lshr_by_b_sub_negated_unfolded:
1033 ; CHECK-NEXT: add w8, w2, w1
1034 ; CHECK-NEXT: lsr w0, w0, w8
1036 %nega = sub i32 0, %a
1037 %negaaddbitwidth = add i32 %nega, 32
1038 %negaaddbitwidthsubb = sub i32 %b, %negaaddbitwidth
1039 %shifted = lshr i32 %val, %negaaddbitwidthsubb
1042 define i64 @reg64_lshr_by_b_sub_negated_unfolded(i64 %val, i64 %a, i64 %b) nounwind {
1043 ; CHECK-LABEL: reg64_lshr_by_b_sub_negated_unfolded:
1045 ; CHECK-NEXT: add x8, x2, x1
1046 ; CHECK-NEXT: lsr x0, x0, x8
1048 %nega = sub i64 0, %a
1049 %negaaddbitwidth = add i64 %nega, 64
1050 %negaaddbitwidthsubb = sub i64 %b, %negaaddbitwidth
1051 %shifted = lshr i64 %val, %negaaddbitwidthsubb
1055 define i32 @reg32_lshr_by_negated_unfolded_add_b(i32 %val, i32 %a, i32 %b) nounwind {
1056 ; CHECK-LABEL: reg32_lshr_by_negated_unfolded_add_b:
1058 ; CHECK-NEXT: sub w8, w2, w1
1059 ; CHECK-NEXT: lsr w0, w0, w8
1061 %nega = sub i32 0, %a
1062 %negaaddbitwidth = add i32 %nega, 32
1063 %negaaddbitwidthaddb = add i32 %negaaddbitwidth, %b
1064 %shifted = lshr i32 %val, %negaaddbitwidthaddb
1067 define i64 @reg64_lshr_by_negated_unfolded_add_b(i64 %val, i64 %a, i64 %b) nounwind {
1068 ; CHECK-LABEL: reg64_lshr_by_negated_unfolded_add_b:
1070 ; CHECK-NEXT: sub x8, x2, x1
1071 ; CHECK-NEXT: lsr x0, x0, x8
1073 %nega = sub i64 0, %a
1074 %negaaddbitwidth = add i64 %nega, 64
1075 %negaaddbitwidthaddb = add i64 %negaaddbitwidth, %b
1076 %shifted = lshr i64 %val, %negaaddbitwidthaddb
1080 ;==============================================================================;
1081 ; and patterns with an actual negation+mask
1083 define i32 @reg32_lshr_by_masked_negated_unfolded(i32 %val, i32 %shamt) nounwind {
1084 ; CHECK-LABEL: reg32_lshr_by_masked_negated_unfolded:
1086 ; CHECK-NEXT: neg w8, w1
1087 ; CHECK-NEXT: lsr w0, w0, w8
1089 %negshamt = sub i32 0, %shamt
1090 %negaaddbitwidth = and i32 %negshamt, 31
1091 %shifted = lshr i32 %val, %negaaddbitwidth
1094 define i64 @reg64_lshr_by_masked_negated_unfolded(i64 %val, i64 %shamt) nounwind {
1095 ; CHECK-LABEL: reg64_lshr_by_masked_negated_unfolded:
1097 ; CHECK-NEXT: neg w8, w1
1098 ; CHECK-NEXT: lsr x0, x0, x8
1100 %negshamt = sub i64 0, %shamt
1101 %negaaddbitwidth = and i64 %negshamt, 63
1102 %shifted = lshr i64 %val, %negaaddbitwidth
1106 define i32 @reg32_lshr_by_masked_negated_unfolded_sub_b(i32 %val, i32 %a, i32 %b) nounwind {
1107 ; CHECK-LABEL: reg32_lshr_by_masked_negated_unfolded_sub_b:
1109 ; CHECK-NEXT: neg w8, w1
1110 ; CHECK-NEXT: and w8, w8, #0x1f
1111 ; CHECK-NEXT: sub w8, w8, w2
1112 ; CHECK-NEXT: lsr w0, w0, w8
1114 %nega = sub i32 0, %a
1115 %negaaddbitwidth = and i32 %nega, 31
1116 %negaaddbitwidthsubb = sub i32 %negaaddbitwidth, %b
1117 %shifted = lshr i32 %val, %negaaddbitwidthsubb
1120 define i64 @reg64_lshr_by_masked_negated_unfolded_sub_b(i64 %val, i64 %a, i64 %b) nounwind {
1121 ; CHECK-LABEL: reg64_lshr_by_masked_negated_unfolded_sub_b:
1123 ; CHECK-NEXT: neg w8, w1
1124 ; CHECK-NEXT: and x8, x8, #0x3f
1125 ; CHECK-NEXT: sub x8, x8, x2
1126 ; CHECK-NEXT: lsr x0, x0, x8
1128 %nega = sub i64 0, %a
1129 %negaaddbitwidth = and i64 %nega, 63
1130 %negaaddbitwidthsubb = sub i64 %negaaddbitwidth, %b
1131 %shifted = lshr i64 %val, %negaaddbitwidthsubb
1135 define i32 @reg32_lshr_by_masked_b_sub_negated_unfolded(i32 %val, i32 %a, i32 %b) nounwind {
1136 ; CHECK-LABEL: reg32_lshr_by_masked_b_sub_negated_unfolded:
1138 ; CHECK-NEXT: neg w8, w1
1139 ; CHECK-NEXT: and w8, w8, #0x1f
1140 ; CHECK-NEXT: sub w8, w2, w8
1141 ; CHECK-NEXT: lsr w0, w0, w8
1143 %nega = sub i32 0, %a
1144 %negaaddbitwidth = and i32 %nega, 31
1145 %negaaddbitwidthsubb = sub i32 %b, %negaaddbitwidth
1146 %shifted = lshr i32 %val, %negaaddbitwidthsubb
1149 define i64 @reg64_lshr_by_masked_b_sub_negated_unfolded(i64 %val, i64 %a, i64 %b) nounwind {
1150 ; CHECK-LABEL: reg64_lshr_by_masked_b_sub_negated_unfolded:
1152 ; CHECK-NEXT: neg w8, w1
1153 ; CHECK-NEXT: and x8, x8, #0x3f
1154 ; CHECK-NEXT: sub x8, x2, x8
1155 ; CHECK-NEXT: lsr x0, x0, x8
1157 %nega = sub i64 0, %a
1158 %negaaddbitwidth = and i64 %nega, 63
1159 %negaaddbitwidthsubb = sub i64 %b, %negaaddbitwidth
1160 %shifted = lshr i64 %val, %negaaddbitwidthsubb
1164 define i32 @reg32_lshr_by_masked_negated_unfolded_add_b(i32 %val, i32 %a, i32 %b) nounwind {
1165 ; CHECK-LABEL: reg32_lshr_by_masked_negated_unfolded_add_b:
1167 ; CHECK-NEXT: neg w8, w1
1168 ; CHECK-NEXT: and w8, w8, #0x1f
1169 ; CHECK-NEXT: add w8, w8, w2
1170 ; CHECK-NEXT: lsr w0, w0, w8
1172 %nega = sub i32 0, %a
1173 %negaaddbitwidth = and i32 %nega, 31
1174 %negaaddbitwidthaddb = add i32 %negaaddbitwidth, %b
1175 %shifted = lshr i32 %val, %negaaddbitwidthaddb
1178 define i64 @reg64_lshr_by_masked_negated_unfolded_add_b(i64 %val, i64 %a, i64 %b) nounwind {
1179 ; CHECK-LABEL: reg64_lshr_by_masked_negated_unfolded_add_b:
1181 ; CHECK-NEXT: neg w8, w1
1182 ; CHECK-NEXT: and x8, x8, #0x3f
1183 ; CHECK-NEXT: add x8, x8, x2
1184 ; CHECK-NEXT: lsr x0, x0, x8
1186 %nega = sub i64 0, %a
1187 %negaaddbitwidth = and i64 %nega, 63
1188 %negaaddbitwidthaddb = add i64 %negaaddbitwidth, %b
1189 %shifted = lshr i64 %val, %negaaddbitwidthaddb
1193 define i32 @t(i64 %x) {
1196 ; CHECK-NEXT: ubfx x0, x0, #17, #28
1197 ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
1199 %s = lshr i64 %x, 13
1200 %t = trunc i64 %s to i32