1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=aarch64-unknown-unknown | FileCheck %s
4 ;==============================================================================;
5 ; the shift amount is negated (shiftbitwidth - shiftamt)
6 ;==============================================================================;
9 ;------------------------------------------------------------------------------;
11 define i32 @reg32_shl_by_negated(i32 %val, i32 %shamt) nounwind {
12 ; CHECK-LABEL: reg32_shl_by_negated:
14 ; CHECK-NEXT: neg w8, w1
15 ; CHECK-NEXT: lsl w0, w0, w8
17 %negshamt = sub i32 32, %shamt
18 %shifted = shl i32 %val, %negshamt
21 define i32 @load32_shl_by_negated(i32* %valptr, i32 %shamt) nounwind {
22 ; CHECK-LABEL: load32_shl_by_negated:
24 ; CHECK-NEXT: ldr w8, [x0]
25 ; CHECK-NEXT: neg w9, w1
26 ; CHECK-NEXT: lsl w0, w8, w9
28 %val = load i32, i32* %valptr
29 %negshamt = sub i32 32, %shamt
30 %shifted = shl i32 %val, %negshamt
33 define void @store32_shl_by_negated(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
34 ; CHECK-LABEL: store32_shl_by_negated:
36 ; CHECK-NEXT: neg w8, w2
37 ; CHECK-NEXT: lsl w8, w0, w8
38 ; CHECK-NEXT: str w8, [x1]
40 %negshamt = sub i32 32, %shamt
41 %shifted = shl i32 %val, %negshamt
42 store i32 %shifted, i32* %dstptr
45 define void @modify32_shl_by_negated(i32* %valptr, i32 %shamt) nounwind {
46 ; CHECK-LABEL: modify32_shl_by_negated:
48 ; CHECK-NEXT: ldr w8, [x0]
49 ; CHECK-NEXT: neg w9, w1
50 ; CHECK-NEXT: lsl w8, w8, w9
51 ; CHECK-NEXT: str w8, [x0]
53 %val = load i32, i32* %valptr
54 %negshamt = sub i32 32, %shamt
55 %shifted = shl i32 %val, %negshamt
56 store i32 %shifted, i32* %valptr
60 define i64 @reg64_shl_by_negated(i64 %val, i64 %shamt) nounwind {
61 ; CHECK-LABEL: reg64_shl_by_negated:
63 ; CHECK-NEXT: neg x8, x1
64 ; CHECK-NEXT: lsl x0, x0, x8
66 %negshamt = sub i64 64, %shamt
67 %shifted = shl i64 %val, %negshamt
70 define i64 @load64_shl_by_negated(i64* %valptr, i64 %shamt) nounwind {
71 ; CHECK-LABEL: load64_shl_by_negated:
73 ; CHECK-NEXT: ldr x8, [x0]
74 ; CHECK-NEXT: neg x9, x1
75 ; CHECK-NEXT: lsl x0, x8, x9
77 %val = load i64, i64* %valptr
78 %negshamt = sub i64 64, %shamt
79 %shifted = shl i64 %val, %negshamt
82 define void @store64_shl_by_negated(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
83 ; CHECK-LABEL: store64_shl_by_negated:
85 ; CHECK-NEXT: neg x8, x2
86 ; CHECK-NEXT: lsl x8, x0, x8
87 ; CHECK-NEXT: str x8, [x1]
89 %negshamt = sub i64 64, %shamt
90 %shifted = shl i64 %val, %negshamt
91 store i64 %shifted, i64* %dstptr
94 define void @modify64_shl_by_negated(i64* %valptr, i64 %shamt) nounwind {
95 ; CHECK-LABEL: modify64_shl_by_negated:
97 ; CHECK-NEXT: ldr x8, [x0]
98 ; CHECK-NEXT: neg x9, x1
99 ; CHECK-NEXT: lsl x8, x8, x9
100 ; CHECK-NEXT: str x8, [x0]
102 %val = load i64, i64* %valptr
103 %negshamt = sub i64 64, %shamt
104 %shifted = shl i64 %val, %negshamt
105 store i64 %shifted, i64* %valptr
109 ; logical shift right
110 ;------------------------------------------------------------------------------;
112 define i32 @reg32_lshr_by_negated(i32 %val, i32 %shamt) nounwind {
113 ; CHECK-LABEL: reg32_lshr_by_negated:
115 ; CHECK-NEXT: neg w8, w1
116 ; CHECK-NEXT: lsr w0, w0, w8
118 %negshamt = sub i32 32, %shamt
119 %shifted = lshr i32 %val, %negshamt
122 define i32 @load32_lshr_by_negated(i32* %valptr, i32 %shamt) nounwind {
123 ; CHECK-LABEL: load32_lshr_by_negated:
125 ; CHECK-NEXT: ldr w8, [x0]
126 ; CHECK-NEXT: neg w9, w1
127 ; CHECK-NEXT: lsr w0, w8, w9
129 %val = load i32, i32* %valptr
130 %negshamt = sub i32 32, %shamt
131 %shifted = lshr i32 %val, %negshamt
134 define void @store32_lshr_by_negated(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
135 ; CHECK-LABEL: store32_lshr_by_negated:
137 ; CHECK-NEXT: neg w8, w2
138 ; CHECK-NEXT: lsr w8, w0, w8
139 ; CHECK-NEXT: str w8, [x1]
141 %negshamt = sub i32 32, %shamt
142 %shifted = lshr i32 %val, %negshamt
143 store i32 %shifted, i32* %dstptr
146 define void @modify32_lshr_by_negated(i32* %valptr, i32 %shamt) nounwind {
147 ; CHECK-LABEL: modify32_lshr_by_negated:
149 ; CHECK-NEXT: ldr w8, [x0]
150 ; CHECK-NEXT: neg w9, w1
151 ; CHECK-NEXT: lsr w8, w8, w9
152 ; CHECK-NEXT: str w8, [x0]
154 %val = load i32, i32* %valptr
155 %negshamt = sub i32 32, %shamt
156 %shifted = lshr i32 %val, %negshamt
157 store i32 %shifted, i32* %valptr
161 define i64 @reg64_lshr_by_negated(i64 %val, i64 %shamt) nounwind {
162 ; CHECK-LABEL: reg64_lshr_by_negated:
164 ; CHECK-NEXT: neg x8, x1
165 ; CHECK-NEXT: lsr x0, x0, x8
167 %negshamt = sub i64 64, %shamt
168 %shifted = lshr i64 %val, %negshamt
171 define i64 @load64_lshr_by_negated(i64* %valptr, i64 %shamt) nounwind {
172 ; CHECK-LABEL: load64_lshr_by_negated:
174 ; CHECK-NEXT: ldr x8, [x0]
175 ; CHECK-NEXT: neg x9, x1
176 ; CHECK-NEXT: lsr x0, x8, x9
178 %val = load i64, i64* %valptr
179 %negshamt = sub i64 64, %shamt
180 %shifted = lshr i64 %val, %negshamt
183 define void @store64_lshr_by_negated(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
184 ; CHECK-LABEL: store64_lshr_by_negated:
186 ; CHECK-NEXT: neg x8, x2
187 ; CHECK-NEXT: lsr x8, x0, x8
188 ; CHECK-NEXT: str x8, [x1]
190 %negshamt = sub i64 64, %shamt
191 %shifted = lshr i64 %val, %negshamt
192 store i64 %shifted, i64* %dstptr
195 define void @modify64_lshr_by_negated(i64* %valptr, i64 %shamt) nounwind {
196 ; CHECK-LABEL: modify64_lshr_by_negated:
198 ; CHECK-NEXT: ldr x8, [x0]
199 ; CHECK-NEXT: neg x9, x1
200 ; CHECK-NEXT: lsr x8, x8, x9
201 ; CHECK-NEXT: str x8, [x0]
203 %val = load i64, i64* %valptr
204 %negshamt = sub i64 64, %shamt
205 %shifted = lshr i64 %val, %negshamt
206 store i64 %shifted, i64* %valptr
210 ; arithmetic shift right
211 ;------------------------------------------------------------------------------;
213 define i32 @reg32_ashr_by_negated(i32 %val, i32 %shamt) nounwind {
214 ; CHECK-LABEL: reg32_ashr_by_negated:
216 ; CHECK-NEXT: neg w8, w1
217 ; CHECK-NEXT: asr w0, w0, w8
219 %negshamt = sub i32 32, %shamt
220 %shifted = ashr i32 %val, %negshamt
223 define i32 @load32_ashr_by_negated(i32* %valptr, i32 %shamt) nounwind {
224 ; CHECK-LABEL: load32_ashr_by_negated:
226 ; CHECK-NEXT: ldr w8, [x0]
227 ; CHECK-NEXT: neg w9, w1
228 ; CHECK-NEXT: asr w0, w8, w9
230 %val = load i32, i32* %valptr
231 %negshamt = sub i32 32, %shamt
232 %shifted = ashr i32 %val, %negshamt
235 define void @store32_ashr_by_negated(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
236 ; CHECK-LABEL: store32_ashr_by_negated:
238 ; CHECK-NEXT: neg w8, w2
239 ; CHECK-NEXT: asr w8, w0, w8
240 ; CHECK-NEXT: str w8, [x1]
242 %negshamt = sub i32 32, %shamt
243 %shifted = ashr i32 %val, %negshamt
244 store i32 %shifted, i32* %dstptr
247 define void @modify32_ashr_by_negated(i32* %valptr, i32 %shamt) nounwind {
248 ; CHECK-LABEL: modify32_ashr_by_negated:
250 ; CHECK-NEXT: ldr w8, [x0]
251 ; CHECK-NEXT: neg w9, w1
252 ; CHECK-NEXT: asr w8, w8, w9
253 ; CHECK-NEXT: str w8, [x0]
255 %val = load i32, i32* %valptr
256 %negshamt = sub i32 32, %shamt
257 %shifted = ashr i32 %val, %negshamt
258 store i32 %shifted, i32* %valptr
262 define i64 @reg64_ashr_by_negated(i64 %val, i64 %shamt) nounwind {
263 ; CHECK-LABEL: reg64_ashr_by_negated:
265 ; CHECK-NEXT: neg x8, x1
266 ; CHECK-NEXT: asr x0, x0, x8
268 %negshamt = sub i64 64, %shamt
269 %shifted = ashr i64 %val, %negshamt
272 define i64 @load64_ashr_by_negated(i64* %valptr, i64 %shamt) nounwind {
273 ; CHECK-LABEL: load64_ashr_by_negated:
275 ; CHECK-NEXT: ldr x8, [x0]
276 ; CHECK-NEXT: neg x9, x1
277 ; CHECK-NEXT: asr x0, x8, x9
279 %val = load i64, i64* %valptr
280 %negshamt = sub i64 64, %shamt
281 %shifted = ashr i64 %val, %negshamt
284 define void @store64_ashr_by_negated(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
285 ; CHECK-LABEL: store64_ashr_by_negated:
287 ; CHECK-NEXT: neg x8, x2
288 ; CHECK-NEXT: asr x8, x0, x8
289 ; CHECK-NEXT: str x8, [x1]
291 %negshamt = sub i64 64, %shamt
292 %shifted = ashr i64 %val, %negshamt
293 store i64 %shifted, i64* %dstptr
296 define void @modify64_ashr_by_negated(i64* %valptr, i64 %shamt) nounwind {
297 ; CHECK-LABEL: modify64_ashr_by_negated:
299 ; CHECK-NEXT: ldr x8, [x0]
300 ; CHECK-NEXT: neg x9, x1
301 ; CHECK-NEXT: asr x8, x8, x9
302 ; CHECK-NEXT: str x8, [x0]
304 %val = load i64, i64* %valptr
305 %negshamt = sub i64 64, %shamt
306 %shifted = ashr i64 %val, %negshamt
307 store i64 %shifted, i64* %valptr
311 ;||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||;
312 ; next let's only test simple reg pattern, and only lshr.
313 ;||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||;
315 ;==============================================================================;
316 ; subtraction from negated shift amount
318 define i32 @reg32_lshr_by_sub_from_negated(i32 %val, i32 %a, i32 %b) nounwind {
319 ; CHECK-LABEL: reg32_lshr_by_sub_from_negated:
321 ; CHECK-NEXT: add w8, w1, w2
322 ; CHECK-NEXT: neg w8, w8
323 ; CHECK-NEXT: lsr w0, w0, w8
325 %nega = sub i32 32, %a
326 %negasubb = sub i32 %nega, %b
327 %shifted = lshr i32 %val, %negasubb
330 define i64 @reg64_lshr_by_sub_from_negated(i64 %val, i64 %a, i64 %b) nounwind {
331 ; CHECK-LABEL: reg64_lshr_by_sub_from_negated:
333 ; CHECK-NEXT: add x8, x1, x2
334 ; CHECK-NEXT: neg x8, x8
335 ; CHECK-NEXT: lsr x0, x0, x8
337 %nega = sub i64 64, %a
338 %negasubb = sub i64 %nega, %b
339 %shifted = lshr i64 %val, %negasubb
343 ;==============================================================================;
344 ; subtraction of negated shift amount
346 define i32 @reg32_lshr_by_sub_of_negated(i32 %val, i32 %a, i32 %b) nounwind {
347 ; CHECK-LABEL: reg32_lshr_by_sub_of_negated:
349 ; CHECK-NEXT: add w8, w1, w2
350 ; CHECK-NEXT: lsr w0, w0, w8
352 %nega = sub i32 32, %a
353 %negasubb = sub i32 %b, %nega
354 %shifted = lshr i32 %val, %negasubb
357 define i64 @reg64_lshr_by_sub_of_negated(i64 %val, i64 %a, i64 %b) nounwind {
358 ; CHECK-LABEL: reg64_lshr_by_sub_of_negated:
360 ; CHECK-NEXT: add x8, x1, x2
361 ; CHECK-NEXT: lsr x0, x0, x8
363 %nega = sub i64 64, %a
364 %negasubb = sub i64 %b, %nega
365 %shifted = lshr i64 %val, %negasubb
369 ;==============================================================================;
370 ; add to negated shift amount
373 define i32 @reg32_lshr_by_add_to_negated(i32 %val, i32 %a, i32 %b) nounwind {
374 ; CHECK-LABEL: reg32_lshr_by_add_to_negated:
376 ; CHECK-NEXT: sub w8, w2, w1
377 ; CHECK-NEXT: lsr w0, w0, w8
379 %nega = sub i32 32, %a
380 %negasubb = add i32 %nega, %b
381 %shifted = lshr i32 %val, %negasubb
384 define i64 @reg64_lshr_by_add_to_negated(i64 %val, i64 %a, i64 %b) nounwind {
385 ; CHECK-LABEL: reg64_lshr_by_add_to_negated:
387 ; CHECK-NEXT: sub x8, x2, x1
388 ; CHECK-NEXT: lsr x0, x0, x8
390 %nega = sub i64 64, %a
391 %negasubb = add i64 %nega, %b
392 %shifted = lshr i64 %val, %negasubb
396 ;==============================================================================;
397 ; subtraction of negated shift amounts
399 define i32 @reg32_lshr_by_sub_of_negated_amts(i32 %val, i32 %a, i32 %b) nounwind {
400 ; CHECK-LABEL: reg32_lshr_by_sub_of_negated_amts:
402 ; CHECK-NEXT: sub w8, w2, w1
403 ; CHECK-NEXT: lsr w0, w0, w8
405 %nega = sub i32 32, %a
406 %negb = sub i32 32, %b
407 %negasubnegb = sub i32 %nega, %negb
408 %shifted = lshr i32 %val, %negasubnegb
411 define i64 @reg64_lshr_by_sub_of_negated_amts(i64 %val, i64 %a, i64 %b) nounwind {
412 ; CHECK-LABEL: reg64_lshr_by_sub_of_negated_amts:
414 ; CHECK-NEXT: sub x8, x2, x1
415 ; CHECK-NEXT: lsr x0, x0, x8
417 %nega = sub i64 64, %a
418 %negb = sub i64 64, %b
419 %negasubnegb = sub i64 %nega, %negb
420 %shifted = lshr i64 %val, %negasubnegb
424 ;==============================================================================;
425 ; addition of negated shift amounts
427 define i32 @reg32_lshr_by_add_of_negated_amts(i32 %val, i32 %a, i32 %b) nounwind {
428 ; CHECK-LABEL: reg32_lshr_by_add_of_negated_amts:
430 ; CHECK-NEXT: add w8, w1, w2
431 ; CHECK-NEXT: neg w8, w8
432 ; CHECK-NEXT: lsr w0, w0, w8
434 %nega = sub i32 32, %a
435 %negb = sub i32 32, %b
436 %negasubnegb = add i32 %nega, %negb
437 %shifted = lshr i32 %val, %negasubnegb
440 define i64 @reg64_lshr_by_add_of_negated_amts(i64 %val, i64 %a, i64 %b) nounwind {
441 ; CHECK-LABEL: reg64_lshr_by_add_of_negated_amts:
443 ; CHECK-NEXT: add x8, x1, x2
444 ; CHECK-NEXT: neg x8, x8
445 ; CHECK-NEXT: lsr x0, x0, x8
447 %nega = sub i64 64, %a
448 %negb = sub i64 64, %b
449 %negasubnegb = add i64 %nega, %negb
450 %shifted = lshr i64 %val, %negasubnegb
454 ;==============================================================================;
455 ; and patterns with an actual negation+addition
457 define i32 @reg32_lshr_by_negated_unfolded(i32 %val, i32 %shamt) nounwind {
458 ; CHECK-LABEL: reg32_lshr_by_negated_unfolded:
460 ; CHECK-NEXT: neg w8, w1
461 ; CHECK-NEXT: lsr w0, w0, w8
463 %negshamt = sub i32 0, %shamt
464 %negaaddbitwidth = add i32 %negshamt, 32
465 %shifted = lshr i32 %val, %negaaddbitwidth
468 define i64 @reg64_lshr_by_negated_unfolded(i64 %val, i64 %shamt) nounwind {
469 ; CHECK-LABEL: reg64_lshr_by_negated_unfolded:
471 ; CHECK-NEXT: neg x8, x1
472 ; CHECK-NEXT: lsr x0, x0, x8
474 %negshamt = sub i64 0, %shamt
475 %negaaddbitwidth = add i64 %negshamt, 64
476 %shifted = lshr i64 %val, %negaaddbitwidth
480 define i32 @reg32_lshr_by_negated_unfolded_sub_b(i32 %val, i32 %a, i32 %b) nounwind {
481 ; CHECK-LABEL: reg32_lshr_by_negated_unfolded_sub_b:
483 ; CHECK-NEXT: add w8, w1, w2
484 ; CHECK-NEXT: neg w8, w8
485 ; CHECK-NEXT: lsr w0, w0, w8
487 %nega = sub i32 0, %a
488 %negaaddbitwidth = add i32 %nega, 32
489 %negaaddbitwidthsubb = sub i32 %negaaddbitwidth, %b
490 %shifted = lshr i32 %val, %negaaddbitwidthsubb
493 define i64 @reg64_lshr_by_negated_unfolded_sub_b(i64 %val, i64 %a, i64 %b) nounwind {
494 ; CHECK-LABEL: reg64_lshr_by_negated_unfolded_sub_b:
496 ; CHECK-NEXT: add x8, x1, x2
497 ; CHECK-NEXT: neg x8, x8
498 ; CHECK-NEXT: lsr x0, x0, x8
500 %nega = sub i64 0, %a
501 %negaaddbitwidth = add i64 %nega, 64
502 %negaaddbitwidthsubb = sub i64 %negaaddbitwidth, %b
503 %shifted = lshr i64 %val, %negaaddbitwidthsubb
507 define i32 @reg32_lshr_by_b_sub_negated_unfolded(i32 %val, i32 %a, i32 %b) nounwind {
508 ; CHECK-LABEL: reg32_lshr_by_b_sub_negated_unfolded:
510 ; CHECK-NEXT: add w8, w2, w1
511 ; CHECK-NEXT: lsr w0, w0, w8
513 %nega = sub i32 0, %a
514 %negaaddbitwidth = add i32 %nega, 32
515 %negaaddbitwidthsubb = sub i32 %b, %negaaddbitwidth
516 %shifted = lshr i32 %val, %negaaddbitwidthsubb
519 define i64 @reg64_lshr_by_b_sub_negated_unfolded(i64 %val, i64 %a, i64 %b) nounwind {
520 ; CHECK-LABEL: reg64_lshr_by_b_sub_negated_unfolded:
522 ; CHECK-NEXT: add x8, x2, x1
523 ; CHECK-NEXT: lsr x0, x0, x8
525 %nega = sub i64 0, %a
526 %negaaddbitwidth = add i64 %nega, 64
527 %negaaddbitwidthsubb = sub i64 %b, %negaaddbitwidth
528 %shifted = lshr i64 %val, %negaaddbitwidthsubb
532 define i32 @reg32_lshr_by_negated_unfolded_add_b(i32 %val, i32 %a, i32 %b) nounwind {
533 ; CHECK-LABEL: reg32_lshr_by_negated_unfolded_add_b:
535 ; CHECK-NEXT: sub w8, w2, w1
536 ; CHECK-NEXT: lsr w0, w0, w8
538 %nega = sub i32 0, %a
539 %negaaddbitwidth = add i32 %nega, 32
540 %negaaddbitwidthaddb = add i32 %negaaddbitwidth, %b
541 %shifted = lshr i32 %val, %negaaddbitwidthaddb
544 define i64 @reg64_lshr_by_negated_unfolded_add_b(i64 %val, i64 %a, i64 %b) nounwind {
545 ; CHECK-LABEL: reg64_lshr_by_negated_unfolded_add_b:
547 ; CHECK-NEXT: sub x8, x2, x1
548 ; CHECK-NEXT: lsr x0, x0, x8
550 %nega = sub i64 0, %a
551 %negaaddbitwidth = add i64 %nega, 64
552 %negaaddbitwidthaddb = add i64 %negaaddbitwidth, %b
553 %shifted = lshr i64 %val, %negaaddbitwidthaddb
557 ;==============================================================================;
558 ; and patterns with an actual negation+mask
560 define i32 @reg32_lshr_by_masked_negated_unfolded(i32 %val, i32 %shamt) nounwind {
561 ; CHECK-LABEL: reg32_lshr_by_masked_negated_unfolded:
563 ; CHECK-NEXT: neg w8, w1
564 ; CHECK-NEXT: lsr w0, w0, w8
566 %negshamt = sub i32 0, %shamt
567 %negaaddbitwidth = and i32 %negshamt, 31
568 %shifted = lshr i32 %val, %negaaddbitwidth
571 define i64 @reg64_lshr_by_masked_negated_unfolded(i64 %val, i64 %shamt) nounwind {
572 ; CHECK-LABEL: reg64_lshr_by_masked_negated_unfolded:
574 ; CHECK-NEXT: neg w8, w1
575 ; CHECK-NEXT: lsr x0, x0, x8
577 %negshamt = sub i64 0, %shamt
578 %negaaddbitwidth = and i64 %negshamt, 63
579 %shifted = lshr i64 %val, %negaaddbitwidth
583 define i32 @reg32_lshr_by_masked_negated_unfolded_sub_b(i32 %val, i32 %a, i32 %b) nounwind {
584 ; CHECK-LABEL: reg32_lshr_by_masked_negated_unfolded_sub_b:
586 ; CHECK-NEXT: neg w8, w1
587 ; CHECK-NEXT: and w8, w8, #0x1f
588 ; CHECK-NEXT: sub w8, w8, w2
589 ; CHECK-NEXT: lsr w0, w0, w8
591 %nega = sub i32 0, %a
592 %negaaddbitwidth = and i32 %nega, 31
593 %negaaddbitwidthsubb = sub i32 %negaaddbitwidth, %b
594 %shifted = lshr i32 %val, %negaaddbitwidthsubb
597 define i64 @reg64_lshr_by_masked_negated_unfolded_sub_b(i64 %val, i64 %a, i64 %b) nounwind {
598 ; CHECK-LABEL: reg64_lshr_by_masked_negated_unfolded_sub_b:
600 ; CHECK-NEXT: neg w8, w1
601 ; CHECK-NEXT: and x8, x8, #0x3f
602 ; CHECK-NEXT: sub x8, x8, x2
603 ; CHECK-NEXT: lsr x0, x0, x8
605 %nega = sub i64 0, %a
606 %negaaddbitwidth = and i64 %nega, 63
607 %negaaddbitwidthsubb = sub i64 %negaaddbitwidth, %b
608 %shifted = lshr i64 %val, %negaaddbitwidthsubb
612 define i32 @reg32_lshr_by_masked_b_sub_negated_unfolded(i32 %val, i32 %a, i32 %b) nounwind {
613 ; CHECK-LABEL: reg32_lshr_by_masked_b_sub_negated_unfolded:
615 ; CHECK-NEXT: neg w8, w1
616 ; CHECK-NEXT: and w8, w8, #0x1f
617 ; CHECK-NEXT: sub w8, w2, w8
618 ; CHECK-NEXT: lsr w0, w0, w8
620 %nega = sub i32 0, %a
621 %negaaddbitwidth = and i32 %nega, 31
622 %negaaddbitwidthsubb = sub i32 %b, %negaaddbitwidth
623 %shifted = lshr i32 %val, %negaaddbitwidthsubb
626 define i64 @reg64_lshr_by_masked_b_sub_negated_unfolded(i64 %val, i64 %a, i64 %b) nounwind {
627 ; CHECK-LABEL: reg64_lshr_by_masked_b_sub_negated_unfolded:
629 ; CHECK-NEXT: neg w8, w1
630 ; CHECK-NEXT: and x8, x8, #0x3f
631 ; CHECK-NEXT: sub x8, x2, x8
632 ; CHECK-NEXT: lsr x0, x0, x8
634 %nega = sub i64 0, %a
635 %negaaddbitwidth = and i64 %nega, 63
636 %negaaddbitwidthsubb = sub i64 %b, %negaaddbitwidth
637 %shifted = lshr i64 %val, %negaaddbitwidthsubb
641 define i32 @reg32_lshr_by_masked_negated_unfolded_add_b(i32 %val, i32 %a, i32 %b) nounwind {
642 ; CHECK-LABEL: reg32_lshr_by_masked_negated_unfolded_add_b:
644 ; CHECK-NEXT: neg w8, w1
645 ; CHECK-NEXT: and w8, w8, #0x1f
646 ; CHECK-NEXT: add w8, w8, w2
647 ; CHECK-NEXT: lsr w0, w0, w8
649 %nega = sub i32 0, %a
650 %negaaddbitwidth = and i32 %nega, 31
651 %negaaddbitwidthaddb = add i32 %negaaddbitwidth, %b
652 %shifted = lshr i32 %val, %negaaddbitwidthaddb
655 define i64 @reg64_lshr_by_masked_negated_unfolded_add_b(i64 %val, i64 %a, i64 %b) nounwind {
656 ; CHECK-LABEL: reg64_lshr_by_masked_negated_unfolded_add_b:
658 ; CHECK-NEXT: neg w8, w1
659 ; CHECK-NEXT: and x8, x8, #0x3f
660 ; CHECK-NEXT: add x8, x8, x2
661 ; CHECK-NEXT: lsr x0, x0, x8
663 %nega = sub i64 0, %a
664 %negaaddbitwidth = and i64 %nega, 63
665 %negaaddbitwidthaddb = add i64 %negaaddbitwidth, %b
666 %shifted = lshr i64 %val, %negaaddbitwidthaddb