1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
4 ; *Please* keep in sync with test/CodeGen/X86/extract-lowbits.ll
6 ; https://bugs.llvm.org/show_bug.cgi?id=36419
7 ; https://bugs.llvm.org/show_bug.cgi?id=37603
8 ; https://bugs.llvm.org/show_bug.cgi?id=37610
11 ; a) x & (1 << nbits) - 1
12 ; b) x & ~(-1 << nbits)
13 ; c) x & (-1 >> (32 - y))
14 ; d) x << (32 - y) >> (32 - y)
17 ; ---------------------------------------------------------------------------- ;
19 ; ---------------------------------------------------------------------------- ;
21 define i32 @bzhi32_a0(i32 %val, i32 %numlowbits) nounwind {
22 ; CHECK-LABEL: bzhi32_a0:
24 ; CHECK-NEXT: mov w8, #1
25 ; CHECK-NEXT: lsl w8, w8, w1
26 ; CHECK-NEXT: sub w8, w8, #1 // =1
27 ; CHECK-NEXT: and w0, w8, w0
29 %onebit = shl i32 1, %numlowbits
30 %mask = add nsw i32 %onebit, -1
31 %masked = and i32 %mask, %val
35 define i32 @bzhi32_a1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
36 ; CHECK-LABEL: bzhi32_a1_indexzext:
38 ; CHECK-NEXT: mov w8, #1
39 ; CHECK-NEXT: lsl w8, w8, w1
40 ; CHECK-NEXT: sub w8, w8, #1 // =1
41 ; CHECK-NEXT: and w0, w8, w0
43 %conv = zext i8 %numlowbits to i32
44 %onebit = shl i32 1, %conv
45 %mask = add nsw i32 %onebit, -1
46 %masked = and i32 %mask, %val
50 define i32 @bzhi32_a2_load(i32* %w, i32 %numlowbits) nounwind {
51 ; CHECK-LABEL: bzhi32_a2_load:
53 ; CHECK-NEXT: ldr w8, [x0]
54 ; CHECK-NEXT: mov w9, #1
55 ; CHECK-NEXT: lsl w9, w9, w1
56 ; CHECK-NEXT: sub w9, w9, #1 // =1
57 ; CHECK-NEXT: and w0, w9, w8
59 %val = load i32, i32* %w
60 %onebit = shl i32 1, %numlowbits
61 %mask = add nsw i32 %onebit, -1
62 %masked = and i32 %mask, %val
66 define i32 @bzhi32_a3_load_indexzext(i32* %w, i8 zeroext %numlowbits) nounwind {
67 ; CHECK-LABEL: bzhi32_a3_load_indexzext:
69 ; CHECK-NEXT: ldr w8, [x0]
70 ; CHECK-NEXT: mov w9, #1
71 ; CHECK-NEXT: lsl w9, w9, w1
72 ; CHECK-NEXT: sub w9, w9, #1 // =1
73 ; CHECK-NEXT: and w0, w9, w8
75 %val = load i32, i32* %w
76 %conv = zext i8 %numlowbits to i32
77 %onebit = shl i32 1, %conv
78 %mask = add nsw i32 %onebit, -1
79 %masked = and i32 %mask, %val
83 define i32 @bzhi32_a4_commutative(i32 %val, i32 %numlowbits) nounwind {
84 ; CHECK-LABEL: bzhi32_a4_commutative:
86 ; CHECK-NEXT: mov w8, #1
87 ; CHECK-NEXT: lsl w8, w8, w1
88 ; CHECK-NEXT: sub w8, w8, #1 // =1
89 ; CHECK-NEXT: and w0, w0, w8
91 %onebit = shl i32 1, %numlowbits
92 %mask = add nsw i32 %onebit, -1
93 %masked = and i32 %val, %mask ; swapped order
99 define i64 @bzhi64_a0(i64 %val, i64 %numlowbits) nounwind {
100 ; CHECK-LABEL: bzhi64_a0:
102 ; CHECK-NEXT: mov w8, #1
103 ; CHECK-NEXT: lsl x8, x8, x1
104 ; CHECK-NEXT: sub x8, x8, #1 // =1
105 ; CHECK-NEXT: and x0, x8, x0
107 %onebit = shl i64 1, %numlowbits
108 %mask = add nsw i64 %onebit, -1
109 %masked = and i64 %mask, %val
113 define i64 @bzhi64_a1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
114 ; CHECK-LABEL: bzhi64_a1_indexzext:
116 ; CHECK-NEXT: mov w8, #1
117 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
118 ; CHECK-NEXT: lsl x8, x8, x1
119 ; CHECK-NEXT: sub x8, x8, #1 // =1
120 ; CHECK-NEXT: and x0, x8, x0
122 %conv = zext i8 %numlowbits to i64
123 %onebit = shl i64 1, %conv
124 %mask = add nsw i64 %onebit, -1
125 %masked = and i64 %mask, %val
129 define i64 @bzhi64_a2_load(i64* %w, i64 %numlowbits) nounwind {
130 ; CHECK-LABEL: bzhi64_a2_load:
132 ; CHECK-NEXT: ldr x8, [x0]
133 ; CHECK-NEXT: mov w9, #1
134 ; CHECK-NEXT: lsl x9, x9, x1
135 ; CHECK-NEXT: sub x9, x9, #1 // =1
136 ; CHECK-NEXT: and x0, x9, x8
138 %val = load i64, i64* %w
139 %onebit = shl i64 1, %numlowbits
140 %mask = add nsw i64 %onebit, -1
141 %masked = and i64 %mask, %val
145 define i64 @bzhi64_a3_load_indexzext(i64* %w, i8 zeroext %numlowbits) nounwind {
146 ; CHECK-LABEL: bzhi64_a3_load_indexzext:
148 ; CHECK-NEXT: ldr x8, [x0]
149 ; CHECK-NEXT: mov w9, #1
150 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
151 ; CHECK-NEXT: lsl x9, x9, x1
152 ; CHECK-NEXT: sub x9, x9, #1 // =1
153 ; CHECK-NEXT: and x0, x9, x8
155 %val = load i64, i64* %w
156 %conv = zext i8 %numlowbits to i64
157 %onebit = shl i64 1, %conv
158 %mask = add nsw i64 %onebit, -1
159 %masked = and i64 %mask, %val
163 define i64 @bzhi64_a4_commutative(i64 %val, i64 %numlowbits) nounwind {
164 ; CHECK-LABEL: bzhi64_a4_commutative:
166 ; CHECK-NEXT: mov w8, #1
167 ; CHECK-NEXT: lsl x8, x8, x1
168 ; CHECK-NEXT: sub x8, x8, #1 // =1
169 ; CHECK-NEXT: and x0, x0, x8
171 %onebit = shl i64 1, %numlowbits
172 %mask = add nsw i64 %onebit, -1
173 %masked = and i64 %val, %mask ; swapped order
177 ; ---------------------------------------------------------------------------- ;
179 ; ---------------------------------------------------------------------------- ;
181 define i32 @bzhi32_b0(i32 %val, i32 %numlowbits) nounwind {
182 ; CHECK-LABEL: bzhi32_b0:
184 ; CHECK-NEXT: mov w8, #-1
185 ; CHECK-NEXT: lsl w8, w8, w1
186 ; CHECK-NEXT: bic w0, w0, w8
188 %notmask = shl i32 -1, %numlowbits
189 %mask = xor i32 %notmask, -1
190 %masked = and i32 %mask, %val
194 define i32 @bzhi32_b1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
195 ; CHECK-LABEL: bzhi32_b1_indexzext:
197 ; CHECK-NEXT: mov w8, #-1
198 ; CHECK-NEXT: lsl w8, w8, w1
199 ; CHECK-NEXT: bic w0, w0, w8
201 %conv = zext i8 %numlowbits to i32
202 %notmask = shl i32 -1, %conv
203 %mask = xor i32 %notmask, -1
204 %masked = and i32 %mask, %val
208 define i32 @bzhi32_b2_load(i32* %w, i32 %numlowbits) nounwind {
209 ; CHECK-LABEL: bzhi32_b2_load:
211 ; CHECK-NEXT: ldr w8, [x0]
212 ; CHECK-NEXT: mov w9, #-1
213 ; CHECK-NEXT: lsl w9, w9, w1
214 ; CHECK-NEXT: bic w0, w8, w9
216 %val = load i32, i32* %w
217 %notmask = shl i32 -1, %numlowbits
218 %mask = xor i32 %notmask, -1
219 %masked = and i32 %mask, %val
223 define i32 @bzhi32_b3_load_indexzext(i32* %w, i8 zeroext %numlowbits) nounwind {
224 ; CHECK-LABEL: bzhi32_b3_load_indexzext:
226 ; CHECK-NEXT: ldr w8, [x0]
227 ; CHECK-NEXT: mov w9, #-1
228 ; CHECK-NEXT: lsl w9, w9, w1
229 ; CHECK-NEXT: bic w0, w8, w9
231 %val = load i32, i32* %w
232 %conv = zext i8 %numlowbits to i32
233 %notmask = shl i32 -1, %conv
234 %mask = xor i32 %notmask, -1
235 %masked = and i32 %mask, %val
239 define i32 @bzhi32_b4_commutative(i32 %val, i32 %numlowbits) nounwind {
240 ; CHECK-LABEL: bzhi32_b4_commutative:
242 ; CHECK-NEXT: mov w8, #-1
243 ; CHECK-NEXT: lsl w8, w8, w1
244 ; CHECK-NEXT: bic w0, w0, w8
246 %notmask = shl i32 -1, %numlowbits
247 %mask = xor i32 %notmask, -1
248 %masked = and i32 %val, %mask ; swapped order
254 define i64 @bzhi64_b0(i64 %val, i64 %numlowbits) nounwind {
255 ; CHECK-LABEL: bzhi64_b0:
257 ; CHECK-NEXT: mov x8, #-1
258 ; CHECK-NEXT: lsl x8, x8, x1
259 ; CHECK-NEXT: bic x0, x0, x8
261 %notmask = shl i64 -1, %numlowbits
262 %mask = xor i64 %notmask, -1
263 %masked = and i64 %mask, %val
267 define i64 @bzhi64_b1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
268 ; CHECK-LABEL: bzhi64_b1_indexzext:
270 ; CHECK-NEXT: mov x8, #-1
271 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
272 ; CHECK-NEXT: lsl x8, x8, x1
273 ; CHECK-NEXT: bic x0, x0, x8
275 %conv = zext i8 %numlowbits to i64
276 %notmask = shl i64 -1, %conv
277 %mask = xor i64 %notmask, -1
278 %masked = and i64 %mask, %val
282 define i64 @bzhi64_b2_load(i64* %w, i64 %numlowbits) nounwind {
283 ; CHECK-LABEL: bzhi64_b2_load:
285 ; CHECK-NEXT: ldr x8, [x0]
286 ; CHECK-NEXT: mov x9, #-1
287 ; CHECK-NEXT: lsl x9, x9, x1
288 ; CHECK-NEXT: bic x0, x8, x9
290 %val = load i64, i64* %w
291 %notmask = shl i64 -1, %numlowbits
292 %mask = xor i64 %notmask, -1
293 %masked = and i64 %mask, %val
297 define i64 @bzhi64_b3_load_indexzext(i64* %w, i8 zeroext %numlowbits) nounwind {
298 ; CHECK-LABEL: bzhi64_b3_load_indexzext:
300 ; CHECK-NEXT: ldr x8, [x0]
301 ; CHECK-NEXT: mov x9, #-1
302 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
303 ; CHECK-NEXT: lsl x9, x9, x1
304 ; CHECK-NEXT: bic x0, x8, x9
306 %val = load i64, i64* %w
307 %conv = zext i8 %numlowbits to i64
308 %notmask = shl i64 -1, %conv
309 %mask = xor i64 %notmask, -1
310 %masked = and i64 %mask, %val
314 define i64 @bzhi64_b4_commutative(i64 %val, i64 %numlowbits) nounwind {
315 ; CHECK-LABEL: bzhi64_b4_commutative:
317 ; CHECK-NEXT: mov x8, #-1
318 ; CHECK-NEXT: lsl x8, x8, x1
319 ; CHECK-NEXT: bic x0, x0, x8
321 %notmask = shl i64 -1, %numlowbits
322 %mask = xor i64 %notmask, -1
323 %masked = and i64 %val, %mask ; swapped order
327 ; ---------------------------------------------------------------------------- ;
329 ; ---------------------------------------------------------------------------- ;
331 define i32 @bzhi32_c0(i32 %val, i32 %numlowbits) nounwind {
332 ; CHECK-LABEL: bzhi32_c0:
334 ; CHECK-NEXT: neg w8, w1
335 ; CHECK-NEXT: mov w9, #-1
336 ; CHECK-NEXT: lsr w8, w9, w8
337 ; CHECK-NEXT: and w0, w8, w0
339 %numhighbits = sub i32 32, %numlowbits
340 %mask = lshr i32 -1, %numhighbits
341 %masked = and i32 %mask, %val
345 define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind {
346 ; CHECK-LABEL: bzhi32_c1_indexzext:
348 ; CHECK-NEXT: mov w8, #32
349 ; CHECK-NEXT: sub w8, w8, w1
350 ; CHECK-NEXT: mov w9, #-1
351 ; CHECK-NEXT: lsr w8, w9, w8
352 ; CHECK-NEXT: and w0, w8, w0
354 %numhighbits = sub i8 32, %numlowbits
355 %sh_prom = zext i8 %numhighbits to i32
356 %mask = lshr i32 -1, %sh_prom
357 %masked = and i32 %mask, %val
361 define i32 @bzhi32_c2_load(i32* %w, i32 %numlowbits) nounwind {
362 ; CHECK-LABEL: bzhi32_c2_load:
364 ; CHECK-NEXT: ldr w8, [x0]
365 ; CHECK-NEXT: neg w9, w1
366 ; CHECK-NEXT: mov w10, #-1
367 ; CHECK-NEXT: lsr w9, w10, w9
368 ; CHECK-NEXT: and w0, w9, w8
370 %val = load i32, i32* %w
371 %numhighbits = sub i32 32, %numlowbits
372 %mask = lshr i32 -1, %numhighbits
373 %masked = and i32 %mask, %val
377 define i32 @bzhi32_c3_load_indexzext(i32* %w, i8 %numlowbits) nounwind {
378 ; CHECK-LABEL: bzhi32_c3_load_indexzext:
380 ; CHECK-NEXT: ldr w8, [x0]
381 ; CHECK-NEXT: mov w9, #32
382 ; CHECK-NEXT: sub w9, w9, w1
383 ; CHECK-NEXT: mov w10, #-1
384 ; CHECK-NEXT: lsr w9, w10, w9
385 ; CHECK-NEXT: and w0, w9, w8
387 %val = load i32, i32* %w
388 %numhighbits = sub i8 32, %numlowbits
389 %sh_prom = zext i8 %numhighbits to i32
390 %mask = lshr i32 -1, %sh_prom
391 %masked = and i32 %mask, %val
395 define i32 @bzhi32_c4_commutative(i32 %val, i32 %numlowbits) nounwind {
396 ; CHECK-LABEL: bzhi32_c4_commutative:
398 ; CHECK-NEXT: neg w8, w1
399 ; CHECK-NEXT: mov w9, #-1
400 ; CHECK-NEXT: lsr w8, w9, w8
401 ; CHECK-NEXT: and w0, w0, w8
403 %numhighbits = sub i32 32, %numlowbits
404 %mask = lshr i32 -1, %numhighbits
405 %masked = and i32 %val, %mask ; swapped order
411 define i64 @bzhi64_c0(i64 %val, i64 %numlowbits) nounwind {
412 ; CHECK-LABEL: bzhi64_c0:
414 ; CHECK-NEXT: neg x8, x1
415 ; CHECK-NEXT: mov x9, #-1
416 ; CHECK-NEXT: lsr x8, x9, x8
417 ; CHECK-NEXT: and x0, x8, x0
419 %numhighbits = sub i64 64, %numlowbits
420 %mask = lshr i64 -1, %numhighbits
421 %masked = and i64 %mask, %val
425 define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind {
426 ; CHECK-LABEL: bzhi64_c1_indexzext:
428 ; CHECK-NEXT: mov w8, #64
429 ; CHECK-NEXT: sub w8, w8, w1
430 ; CHECK-NEXT: mov x9, #-1
431 ; CHECK-NEXT: lsr x8, x9, x8
432 ; CHECK-NEXT: and x0, x8, x0
434 %numhighbits = sub i8 64, %numlowbits
435 %sh_prom = zext i8 %numhighbits to i64
436 %mask = lshr i64 -1, %sh_prom
437 %masked = and i64 %mask, %val
441 define i64 @bzhi64_c2_load(i64* %w, i64 %numlowbits) nounwind {
442 ; CHECK-LABEL: bzhi64_c2_load:
444 ; CHECK-NEXT: ldr x8, [x0]
445 ; CHECK-NEXT: neg x9, x1
446 ; CHECK-NEXT: mov x10, #-1
447 ; CHECK-NEXT: lsr x9, x10, x9
448 ; CHECK-NEXT: and x0, x9, x8
450 %val = load i64, i64* %w
451 %numhighbits = sub i64 64, %numlowbits
452 %mask = lshr i64 -1, %numhighbits
453 %masked = and i64 %mask, %val
457 define i64 @bzhi64_c3_load_indexzext(i64* %w, i8 %numlowbits) nounwind {
458 ; CHECK-LABEL: bzhi64_c3_load_indexzext:
460 ; CHECK-NEXT: ldr x8, [x0]
461 ; CHECK-NEXT: mov w9, #64
462 ; CHECK-NEXT: sub w9, w9, w1
463 ; CHECK-NEXT: mov x10, #-1
464 ; CHECK-NEXT: lsr x9, x10, x9
465 ; CHECK-NEXT: and x0, x9, x8
467 %val = load i64, i64* %w
468 %numhighbits = sub i8 64, %numlowbits
469 %sh_prom = zext i8 %numhighbits to i64
470 %mask = lshr i64 -1, %sh_prom
471 %masked = and i64 %mask, %val
475 define i64 @bzhi64_c4_commutative(i64 %val, i64 %numlowbits) nounwind {
476 ; CHECK-LABEL: bzhi64_c4_commutative:
478 ; CHECK-NEXT: neg x8, x1
479 ; CHECK-NEXT: mov x9, #-1
480 ; CHECK-NEXT: lsr x8, x9, x8
481 ; CHECK-NEXT: and x0, x0, x8
483 %numhighbits = sub i64 64, %numlowbits
484 %mask = lshr i64 -1, %numhighbits
485 %masked = and i64 %val, %mask ; swapped order
489 ; ---------------------------------------------------------------------------- ;
491 ; ---------------------------------------------------------------------------- ;
493 define i32 @bzhi32_d0(i32 %val, i32 %numlowbits) nounwind {
494 ; CHECK-LABEL: bzhi32_d0:
496 ; CHECK-NEXT: neg w8, w1
497 ; CHECK-NEXT: lsl w9, w0, w8
498 ; CHECK-NEXT: lsr w0, w9, w8
500 %numhighbits = sub i32 32, %numlowbits
501 %highbitscleared = shl i32 %val, %numhighbits
502 %masked = lshr i32 %highbitscleared, %numhighbits
506 define i32 @bzhi32_d1_indexzext(i32 %val, i8 %numlowbits) nounwind {
507 ; CHECK-LABEL: bzhi32_d1_indexzext:
509 ; CHECK-NEXT: mov w8, #32
510 ; CHECK-NEXT: sub w8, w8, w1
511 ; CHECK-NEXT: lsl w9, w0, w8
512 ; CHECK-NEXT: lsr w0, w9, w8
514 %numhighbits = sub i8 32, %numlowbits
515 %sh_prom = zext i8 %numhighbits to i32
516 %highbitscleared = shl i32 %val, %sh_prom
517 %masked = lshr i32 %highbitscleared, %sh_prom
521 define i32 @bzhi32_d2_load(i32* %w, i32 %numlowbits) nounwind {
522 ; CHECK-LABEL: bzhi32_d2_load:
524 ; CHECK-NEXT: ldr w8, [x0]
525 ; CHECK-NEXT: neg w9, w1
526 ; CHECK-NEXT: lsl w8, w8, w9
527 ; CHECK-NEXT: lsr w0, w8, w9
529 %val = load i32, i32* %w
530 %numhighbits = sub i32 32, %numlowbits
531 %highbitscleared = shl i32 %val, %numhighbits
532 %masked = lshr i32 %highbitscleared, %numhighbits
536 define i32 @bzhi32_d3_load_indexzext(i32* %w, i8 %numlowbits) nounwind {
537 ; CHECK-LABEL: bzhi32_d3_load_indexzext:
539 ; CHECK-NEXT: ldr w8, [x0]
540 ; CHECK-NEXT: mov w9, #32
541 ; CHECK-NEXT: sub w9, w9, w1
542 ; CHECK-NEXT: lsl w8, w8, w9
543 ; CHECK-NEXT: lsr w0, w8, w9
545 %val = load i32, i32* %w
546 %numhighbits = sub i8 32, %numlowbits
547 %sh_prom = zext i8 %numhighbits to i32
548 %highbitscleared = shl i32 %val, %sh_prom
549 %masked = lshr i32 %highbitscleared, %sh_prom
555 define i64 @bzhi64_d0(i64 %val, i64 %numlowbits) nounwind {
556 ; CHECK-LABEL: bzhi64_d0:
558 ; CHECK-NEXT: neg x8, x1
559 ; CHECK-NEXT: lsl x9, x0, x8
560 ; CHECK-NEXT: lsr x0, x9, x8
562 %numhighbits = sub i64 64, %numlowbits
563 %highbitscleared = shl i64 %val, %numhighbits
564 %masked = lshr i64 %highbitscleared, %numhighbits
568 define i64 @bzhi64_d1_indexzext(i64 %val, i8 %numlowbits) nounwind {
569 ; CHECK-LABEL: bzhi64_d1_indexzext:
571 ; CHECK-NEXT: mov w8, #64
572 ; CHECK-NEXT: sub w8, w8, w1
573 ; CHECK-NEXT: lsl x9, x0, x8
574 ; CHECK-NEXT: lsr x0, x9, x8
576 %numhighbits = sub i8 64, %numlowbits
577 %sh_prom = zext i8 %numhighbits to i64
578 %highbitscleared = shl i64 %val, %sh_prom
579 %masked = lshr i64 %highbitscleared, %sh_prom
583 define i64 @bzhi64_d2_load(i64* %w, i64 %numlowbits) nounwind {
584 ; CHECK-LABEL: bzhi64_d2_load:
586 ; CHECK-NEXT: ldr x8, [x0]
587 ; CHECK-NEXT: neg x9, x1
588 ; CHECK-NEXT: lsl x8, x8, x9
589 ; CHECK-NEXT: lsr x0, x8, x9
591 %val = load i64, i64* %w
592 %numhighbits = sub i64 64, %numlowbits
593 %highbitscleared = shl i64 %val, %numhighbits
594 %masked = lshr i64 %highbitscleared, %numhighbits
598 define i64 @bzhi64_d3_load_indexzext(i64* %w, i8 %numlowbits) nounwind {
599 ; CHECK-LABEL: bzhi64_d3_load_indexzext:
601 ; CHECK-NEXT: ldr x8, [x0]
602 ; CHECK-NEXT: mov w9, #64
603 ; CHECK-NEXT: sub w9, w9, w1
604 ; CHECK-NEXT: lsl x8, x8, x9
605 ; CHECK-NEXT: lsr x0, x8, x9
607 %val = load i64, i64* %w
608 %numhighbits = sub i8 64, %numlowbits
609 %sh_prom = zext i8 %numhighbits to i64
610 %highbitscleared = shl i64 %val, %sh_prom
611 %masked = lshr i64 %highbitscleared, %sh_prom
615 ; ---------------------------------------------------------------------------- ;
617 ; ---------------------------------------------------------------------------- ;
621 define i32 @bzhi32_constant_mask32(i32 %val) nounwind {
622 ; CHECK-LABEL: bzhi32_constant_mask32:
624 ; CHECK-NEXT: and w0, w0, #0x7fffffff
626 %masked = and i32 %val, 2147483647
630 define i32 @bzhi32_constant_mask32_load(i32* %val) nounwind {
631 ; CHECK-LABEL: bzhi32_constant_mask32_load:
633 ; CHECK-NEXT: ldr w8, [x0]
634 ; CHECK-NEXT: and w0, w8, #0x7fffffff
636 %val1 = load i32, i32* %val
637 %masked = and i32 %val1, 2147483647
641 define i32 @bzhi32_constant_mask16(i32 %val) nounwind {
642 ; CHECK-LABEL: bzhi32_constant_mask16:
644 ; CHECK-NEXT: and w0, w0, #0x7fff
646 %masked = and i32 %val, 32767
650 define i32 @bzhi32_constant_mask16_load(i32* %val) nounwind {
651 ; CHECK-LABEL: bzhi32_constant_mask16_load:
653 ; CHECK-NEXT: ldr w8, [x0]
654 ; CHECK-NEXT: and w0, w8, #0x7fff
656 %val1 = load i32, i32* %val
657 %masked = and i32 %val1, 32767
661 define i32 @bzhi32_constant_mask8(i32 %val) nounwind {
662 ; CHECK-LABEL: bzhi32_constant_mask8:
664 ; CHECK-NEXT: and w0, w0, #0x7f
666 %masked = and i32 %val, 127
670 define i32 @bzhi32_constant_mask8_load(i32* %val) nounwind {
671 ; CHECK-LABEL: bzhi32_constant_mask8_load:
673 ; CHECK-NEXT: ldr w8, [x0]
674 ; CHECK-NEXT: and w0, w8, #0x7f
676 %val1 = load i32, i32* %val
677 %masked = and i32 %val1, 127
683 define i64 @bzhi64_constant_mask64(i64 %val) nounwind {
684 ; CHECK-LABEL: bzhi64_constant_mask64:
686 ; CHECK-NEXT: and x0, x0, #0x3fffffffffffffff
688 %masked = and i64 %val, 4611686018427387903
692 define i64 @bzhi64_constant_mask64_load(i64* %val) nounwind {
693 ; CHECK-LABEL: bzhi64_constant_mask64_load:
695 ; CHECK-NEXT: ldr x8, [x0]
696 ; CHECK-NEXT: and x0, x8, #0x3fffffffffffffff
698 %val1 = load i64, i64* %val
699 %masked = and i64 %val1, 4611686018427387903
703 define i64 @bzhi64_constant_mask32(i64 %val) nounwind {
704 ; CHECK-LABEL: bzhi64_constant_mask32:
706 ; CHECK-NEXT: and x0, x0, #0x7fffffff
708 %masked = and i64 %val, 2147483647
712 define i64 @bzhi64_constant_mask32_load(i64* %val) nounwind {
713 ; CHECK-LABEL: bzhi64_constant_mask32_load:
715 ; CHECK-NEXT: ldr x8, [x0]
716 ; CHECK-NEXT: and x0, x8, #0x7fffffff
718 %val1 = load i64, i64* %val
719 %masked = and i64 %val1, 2147483647
723 define i64 @bzhi64_constant_mask16(i64 %val) nounwind {
724 ; CHECK-LABEL: bzhi64_constant_mask16:
726 ; CHECK-NEXT: and x0, x0, #0x7fff
728 %masked = and i64 %val, 32767
732 define i64 @bzhi64_constant_mask16_load(i64* %val) nounwind {
733 ; CHECK-LABEL: bzhi64_constant_mask16_load:
735 ; CHECK-NEXT: ldr x8, [x0]
736 ; CHECK-NEXT: and x0, x8, #0x7fff
738 %val1 = load i64, i64* %val
739 %masked = and i64 %val1, 32767
743 define i64 @bzhi64_constant_mask8(i64 %val) nounwind {
744 ; CHECK-LABEL: bzhi64_constant_mask8:
746 ; CHECK-NEXT: and x0, x0, #0x7f
748 %masked = and i64 %val, 127
752 define i64 @bzhi64_constant_mask8_load(i64* %val) nounwind {
753 ; CHECK-LABEL: bzhi64_constant_mask8_load:
755 ; CHECK-NEXT: ldr x8, [x0]
756 ; CHECK-NEXT: and x0, x8, #0x7f
758 %val1 = load i64, i64* %val
759 %masked = and i64 %val1, 127