1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
4 ; *Please* keep in sync with test/CodeGen/X86/extract-bits.ll
6 ; https://bugs.llvm.org/show_bug.cgi?id=36419
7 ; https://bugs.llvm.org/show_bug.cgi?id=37603
8 ; https://bugs.llvm.org/show_bug.cgi?id=37610
11 ; a) (x >> start) & (1 << nbits) - 1
12 ; b) (x >> start) & ~(-1 << nbits)
13 ; c) (x >> start) & (-1 >> (32 - y))
14 ; d) (x >> start) << (32 - y) >> (32 - y)
17 ; ---------------------------------------------------------------------------- ;
19 ; ---------------------------------------------------------------------------- ;
21 define i32 @bextr32_a0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
22 ; CHECK-LABEL: bextr32_a0:
24 ; CHECK-NEXT: mov w8, #1 // =0x1
25 ; CHECK-NEXT: lsr w9, w0, w1
26 ; CHECK-NEXT: lsl w8, w8, w2
27 ; CHECK-NEXT: sub w8, w8, #1
28 ; CHECK-NEXT: and w0, w8, w9
30 %shifted = lshr i32 %val, %numskipbits
31 %onebit = shl i32 1, %numlowbits
32 %mask = add nsw i32 %onebit, -1
33 %masked = and i32 %mask, %shifted
37 define i32 @bextr32_a0_arithmetic(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
38 ; CHECK-LABEL: bextr32_a0_arithmetic:
40 ; CHECK-NEXT: mov w8, #1 // =0x1
41 ; CHECK-NEXT: asr w9, w0, w1
42 ; CHECK-NEXT: lsl w8, w8, w2
43 ; CHECK-NEXT: sub w8, w8, #1
44 ; CHECK-NEXT: and w0, w8, w9
46 %shifted = ashr i32 %val, %numskipbits
47 %onebit = shl i32 1, %numlowbits
48 %mask = add nsw i32 %onebit, -1
49 %masked = and i32 %mask, %shifted
53 define i32 @bextr32_a1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
54 ; CHECK-LABEL: bextr32_a1_indexzext:
56 ; CHECK-NEXT: mov w8, #1 // =0x1
57 ; CHECK-NEXT: lsr w9, w0, w1
58 ; CHECK-NEXT: lsl w8, w8, w2
59 ; CHECK-NEXT: sub w8, w8, #1
60 ; CHECK-NEXT: and w0, w8, w9
62 %skip = zext i8 %numskipbits to i32
63 %shifted = lshr i32 %val, %skip
64 %conv = zext i8 %numlowbits to i32
65 %onebit = shl i32 1, %conv
66 %mask = add nsw i32 %onebit, -1
67 %masked = and i32 %mask, %shifted
71 define i32 @bextr32_a2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
72 ; CHECK-LABEL: bextr32_a2_load:
74 ; CHECK-NEXT: ldr w8, [x0]
75 ; CHECK-NEXT: mov w9, #1 // =0x1
76 ; CHECK-NEXT: lsl w9, w9, w2
77 ; CHECK-NEXT: lsr w8, w8, w1
78 ; CHECK-NEXT: sub w9, w9, #1
79 ; CHECK-NEXT: and w0, w9, w8
81 %val = load i32, ptr %w
82 %shifted = lshr i32 %val, %numskipbits
83 %onebit = shl i32 1, %numlowbits
84 %mask = add nsw i32 %onebit, -1
85 %masked = and i32 %mask, %shifted
89 define i32 @bextr32_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
90 ; CHECK-LABEL: bextr32_a3_load_indexzext:
92 ; CHECK-NEXT: ldr w8, [x0]
93 ; CHECK-NEXT: mov w9, #1 // =0x1
94 ; CHECK-NEXT: lsl w9, w9, w2
95 ; CHECK-NEXT: lsr w8, w8, w1
96 ; CHECK-NEXT: sub w9, w9, #1
97 ; CHECK-NEXT: and w0, w9, w8
99 %val = load i32, ptr %w
100 %skip = zext i8 %numskipbits to i32
101 %shifted = lshr i32 %val, %skip
102 %conv = zext i8 %numlowbits to i32
103 %onebit = shl i32 1, %conv
104 %mask = add nsw i32 %onebit, -1
105 %masked = and i32 %mask, %shifted
109 define i32 @bextr32_a4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
110 ; CHECK-LABEL: bextr32_a4_commutative:
112 ; CHECK-NEXT: mov w8, #1 // =0x1
113 ; CHECK-NEXT: lsr w9, w0, w1
114 ; CHECK-NEXT: lsl w8, w8, w2
115 ; CHECK-NEXT: sub w8, w8, #1
116 ; CHECK-NEXT: and w0, w9, w8
118 %shifted = lshr i32 %val, %numskipbits
119 %onebit = shl i32 1, %numlowbits
120 %mask = add nsw i32 %onebit, -1
121 %masked = and i32 %shifted, %mask ; swapped order
127 define i64 @bextr64_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
128 ; CHECK-LABEL: bextr64_a0:
130 ; CHECK-NEXT: mov w8, #1 // =0x1
131 ; CHECK-NEXT: lsr x9, x0, x1
132 ; CHECK-NEXT: lsl x8, x8, x2
133 ; CHECK-NEXT: sub x8, x8, #1
134 ; CHECK-NEXT: and x0, x8, x9
136 %shifted = lshr i64 %val, %numskipbits
137 %onebit = shl i64 1, %numlowbits
138 %mask = add nsw i64 %onebit, -1
139 %masked = and i64 %mask, %shifted
143 define i64 @bextr64_a0_arithmetic(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
144 ; CHECK-LABEL: bextr64_a0_arithmetic:
146 ; CHECK-NEXT: mov w8, #1 // =0x1
147 ; CHECK-NEXT: asr x9, x0, x1
148 ; CHECK-NEXT: lsl x8, x8, x2
149 ; CHECK-NEXT: sub x8, x8, #1
150 ; CHECK-NEXT: and x0, x8, x9
152 %shifted = ashr i64 %val, %numskipbits
153 %onebit = shl i64 1, %numlowbits
154 %mask = add nsw i64 %onebit, -1
155 %masked = and i64 %mask, %shifted
159 define i64 @bextr64_a1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
160 ; CHECK-LABEL: bextr64_a1_indexzext:
162 ; CHECK-NEXT: mov w8, #1 // =0x1
163 ; CHECK-NEXT: // kill: def $w2 killed $w2 def $x2
164 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
165 ; CHECK-NEXT: lsr x9, x0, x1
166 ; CHECK-NEXT: lsl x8, x8, x2
167 ; CHECK-NEXT: sub x8, x8, #1
168 ; CHECK-NEXT: and x0, x8, x9
170 %skip = zext i8 %numskipbits to i64
171 %shifted = lshr i64 %val, %skip
172 %conv = zext i8 %numlowbits to i64
173 %onebit = shl i64 1, %conv
174 %mask = add nsw i64 %onebit, -1
175 %masked = and i64 %mask, %shifted
179 define i64 @bextr64_a2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
180 ; CHECK-LABEL: bextr64_a2_load:
182 ; CHECK-NEXT: ldr x8, [x0]
183 ; CHECK-NEXT: mov w9, #1 // =0x1
184 ; CHECK-NEXT: lsl x9, x9, x2
185 ; CHECK-NEXT: lsr x8, x8, x1
186 ; CHECK-NEXT: sub x9, x9, #1
187 ; CHECK-NEXT: and x0, x9, x8
189 %val = load i64, ptr %w
190 %shifted = lshr i64 %val, %numskipbits
191 %onebit = shl i64 1, %numlowbits
192 %mask = add nsw i64 %onebit, -1
193 %masked = and i64 %mask, %shifted
197 define i64 @bextr64_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
198 ; CHECK-LABEL: bextr64_a3_load_indexzext:
200 ; CHECK-NEXT: ldr x8, [x0]
201 ; CHECK-NEXT: mov w9, #1 // =0x1
202 ; CHECK-NEXT: // kill: def $w2 killed $w2 def $x2
203 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
204 ; CHECK-NEXT: lsl x9, x9, x2
205 ; CHECK-NEXT: lsr x8, x8, x1
206 ; CHECK-NEXT: sub x9, x9, #1
207 ; CHECK-NEXT: and x0, x9, x8
209 %val = load i64, ptr %w
210 %skip = zext i8 %numskipbits to i64
211 %shifted = lshr i64 %val, %skip
212 %conv = zext i8 %numlowbits to i64
213 %onebit = shl i64 1, %conv
214 %mask = add nsw i64 %onebit, -1
215 %masked = and i64 %mask, %shifted
219 define i64 @bextr64_a4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
220 ; CHECK-LABEL: bextr64_a4_commutative:
222 ; CHECK-NEXT: mov w8, #1 // =0x1
223 ; CHECK-NEXT: lsr x9, x0, x1
224 ; CHECK-NEXT: lsl x8, x8, x2
225 ; CHECK-NEXT: sub x8, x8, #1
226 ; CHECK-NEXT: and x0, x9, x8
228 %shifted = lshr i64 %val, %numskipbits
229 %onebit = shl i64 1, %numlowbits
230 %mask = add nsw i64 %onebit, -1
231 %masked = and i64 %shifted, %mask ; swapped order
235 ; 64-bit, but with 32-bit output
237 ; Everything done in 64-bit, truncation happens last.
238 define i32 @bextr64_32_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
239 ; CHECK-LABEL: bextr64_32_a0:
241 ; CHECK-NEXT: mov w8, #1 // =0x1
242 ; CHECK-NEXT: lsr x9, x0, x1
243 ; CHECK-NEXT: lsl x8, x8, x2
244 ; CHECK-NEXT: sub w8, w8, #1
245 ; CHECK-NEXT: and w0, w8, w9
247 %shifted = lshr i64 %val, %numskipbits
248 %onebit = shl i64 1, %numlowbits
249 %mask = add nsw i64 %onebit, -1
250 %masked = and i64 %mask, %shifted
251 %res = trunc i64 %masked to i32
255 ; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
256 define i32 @bextr64_32_a1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
257 ; CHECK-LABEL: bextr64_32_a1:
259 ; CHECK-NEXT: mov w8, #1 // =0x1
260 ; CHECK-NEXT: lsr x9, x0, x1
261 ; CHECK-NEXT: lsl w8, w8, w2
262 ; CHECK-NEXT: sub w8, w8, #1
263 ; CHECK-NEXT: and w0, w8, w9
265 %shifted = lshr i64 %val, %numskipbits
266 %truncshifted = trunc i64 %shifted to i32
267 %onebit = shl i32 1, %numlowbits
268 %mask = add nsw i32 %onebit, -1
269 %masked = and i32 %mask, %truncshifted
273 ; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
274 ; Masking is 64-bit. Then truncation.
275 define i32 @bextr64_32_a2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
276 ; CHECK-LABEL: bextr64_32_a2:
278 ; CHECK-NEXT: mov w8, #1 // =0x1
279 ; CHECK-NEXT: lsr x9, x0, x1
280 ; CHECK-NEXT: lsl w8, w8, w2
281 ; CHECK-NEXT: sub w8, w8, #1
282 ; CHECK-NEXT: and w0, w8, w9
284 %shifted = lshr i64 %val, %numskipbits
285 %onebit = shl i32 1, %numlowbits
286 %mask = add nsw i32 %onebit, -1
287 %zextmask = zext i32 %mask to i64
288 %masked = and i64 %zextmask, %shifted
289 %truncmasked = trunc i64 %masked to i32
293 ; ---------------------------------------------------------------------------- ;
295 ; ---------------------------------------------------------------------------- ;
297 define i32 @bextr32_b0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
298 ; CHECK-LABEL: bextr32_b0:
300 ; CHECK-NEXT: mov w8, #-1 // =0xffffffff
301 ; CHECK-NEXT: lsr w9, w0, w1
302 ; CHECK-NEXT: lsl w8, w8, w2
303 ; CHECK-NEXT: bic w0, w9, w8
305 %shifted = lshr i32 %val, %numskipbits
306 %notmask = shl i32 -1, %numlowbits
307 %mask = xor i32 %notmask, -1
308 %masked = and i32 %mask, %shifted
312 define i32 @bextr32_b1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
313 ; CHECK-LABEL: bextr32_b1_indexzext:
315 ; CHECK-NEXT: mov w8, #-1 // =0xffffffff
316 ; CHECK-NEXT: lsr w9, w0, w1
317 ; CHECK-NEXT: lsl w8, w8, w2
318 ; CHECK-NEXT: bic w0, w9, w8
320 %skip = zext i8 %numskipbits to i32
321 %shifted = lshr i32 %val, %skip
322 %conv = zext i8 %numlowbits to i32
323 %notmask = shl i32 -1, %conv
324 %mask = xor i32 %notmask, -1
325 %masked = and i32 %mask, %shifted
329 define i32 @bextr32_b2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
330 ; CHECK-LABEL: bextr32_b2_load:
332 ; CHECK-NEXT: ldr w8, [x0]
333 ; CHECK-NEXT: mov w9, #-1 // =0xffffffff
334 ; CHECK-NEXT: lsl w9, w9, w2
335 ; CHECK-NEXT: lsr w8, w8, w1
336 ; CHECK-NEXT: bic w0, w8, w9
338 %val = load i32, ptr %w
339 %shifted = lshr i32 %val, %numskipbits
340 %notmask = shl i32 -1, %numlowbits
341 %mask = xor i32 %notmask, -1
342 %masked = and i32 %mask, %shifted
346 define i32 @bextr32_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
347 ; CHECK-LABEL: bextr32_b3_load_indexzext:
349 ; CHECK-NEXT: ldr w8, [x0]
350 ; CHECK-NEXT: mov w9, #-1 // =0xffffffff
351 ; CHECK-NEXT: lsl w9, w9, w2
352 ; CHECK-NEXT: lsr w8, w8, w1
353 ; CHECK-NEXT: bic w0, w8, w9
355 %val = load i32, ptr %w
356 %skip = zext i8 %numskipbits to i32
357 %shifted = lshr i32 %val, %skip
358 %conv = zext i8 %numlowbits to i32
359 %notmask = shl i32 -1, %conv
360 %mask = xor i32 %notmask, -1
361 %masked = and i32 %mask, %shifted
365 define i32 @bextr32_b4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
366 ; CHECK-LABEL: bextr32_b4_commutative:
368 ; CHECK-NEXT: mov w8, #-1 // =0xffffffff
369 ; CHECK-NEXT: lsr w9, w0, w1
370 ; CHECK-NEXT: lsl w8, w8, w2
371 ; CHECK-NEXT: bic w0, w9, w8
373 %shifted = lshr i32 %val, %numskipbits
374 %notmask = shl i32 -1, %numlowbits
375 %mask = xor i32 %notmask, -1
376 %masked = and i32 %shifted, %mask ; swapped order
382 define i64 @bextr64_b0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
383 ; CHECK-LABEL: bextr64_b0:
385 ; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
386 ; CHECK-NEXT: lsr x9, x0, x1
387 ; CHECK-NEXT: lsl x8, x8, x2
388 ; CHECK-NEXT: bic x0, x9, x8
390 %shifted = lshr i64 %val, %numskipbits
391 %notmask = shl i64 -1, %numlowbits
392 %mask = xor i64 %notmask, -1
393 %masked = and i64 %mask, %shifted
397 define i64 @bextr64_b1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
398 ; CHECK-LABEL: bextr64_b1_indexzext:
400 ; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
401 ; CHECK-NEXT: // kill: def $w2 killed $w2 def $x2
402 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
403 ; CHECK-NEXT: lsr x9, x0, x1
404 ; CHECK-NEXT: lsl x8, x8, x2
405 ; CHECK-NEXT: bic x0, x9, x8
407 %skip = zext i8 %numskipbits to i64
408 %shifted = lshr i64 %val, %skip
409 %conv = zext i8 %numlowbits to i64
410 %notmask = shl i64 -1, %conv
411 %mask = xor i64 %notmask, -1
412 %masked = and i64 %mask, %shifted
416 define i64 @bextr64_b2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
417 ; CHECK-LABEL: bextr64_b2_load:
419 ; CHECK-NEXT: ldr x8, [x0]
420 ; CHECK-NEXT: mov x9, #-1 // =0xffffffffffffffff
421 ; CHECK-NEXT: lsl x9, x9, x2
422 ; CHECK-NEXT: lsr x8, x8, x1
423 ; CHECK-NEXT: bic x0, x8, x9
425 %val = load i64, ptr %w
426 %shifted = lshr i64 %val, %numskipbits
427 %notmask = shl i64 -1, %numlowbits
428 %mask = xor i64 %notmask, -1
429 %masked = and i64 %mask, %shifted
433 define i64 @bextr64_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
434 ; CHECK-LABEL: bextr64_b3_load_indexzext:
436 ; CHECK-NEXT: ldr x8, [x0]
437 ; CHECK-NEXT: mov x9, #-1 // =0xffffffffffffffff
438 ; CHECK-NEXT: // kill: def $w2 killed $w2 def $x2
439 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
440 ; CHECK-NEXT: lsl x9, x9, x2
441 ; CHECK-NEXT: lsr x8, x8, x1
442 ; CHECK-NEXT: bic x0, x8, x9
444 %val = load i64, ptr %w
445 %skip = zext i8 %numskipbits to i64
446 %shifted = lshr i64 %val, %skip
447 %conv = zext i8 %numlowbits to i64
448 %notmask = shl i64 -1, %conv
449 %mask = xor i64 %notmask, -1
450 %masked = and i64 %mask, %shifted
454 define i64 @bextr64_b4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
455 ; CHECK-LABEL: bextr64_b4_commutative:
457 ; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
458 ; CHECK-NEXT: lsr x9, x0, x1
459 ; CHECK-NEXT: lsl x8, x8, x2
460 ; CHECK-NEXT: bic x0, x9, x8
462 %shifted = lshr i64 %val, %numskipbits
463 %notmask = shl i64 -1, %numlowbits
464 %mask = xor i64 %notmask, -1
465 %masked = and i64 %shifted, %mask ; swapped order
469 ; 64-bit, but with 32-bit output
471 ; Everything done in 64-bit, truncation happens last.
472 define i32 @bextr64_32_b0(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
473 ; CHECK-LABEL: bextr64_32_b0:
475 ; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
476 ; CHECK-NEXT: // kill: def $w2 killed $w2 def $x2
477 ; CHECK-NEXT: lsr x9, x0, x1
478 ; CHECK-NEXT: lsl x8, x8, x2
479 ; CHECK-NEXT: bic w0, w9, w8
481 %shiftedval = lshr i64 %val, %numskipbits
482 %widenumlowbits = zext i8 %numlowbits to i64
483 %notmask = shl nsw i64 -1, %widenumlowbits
484 %mask = xor i64 %notmask, -1
485 %wideres = and i64 %shiftedval, %mask
486 %res = trunc i64 %wideres to i32
490 ; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
491 define i32 @bextr64_32_b1(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
492 ; CHECK-LABEL: bextr64_32_b1:
494 ; CHECK-NEXT: mov w8, #-1 // =0xffffffff
495 ; CHECK-NEXT: // kill: def $w2 killed $w2 def $x2
496 ; CHECK-NEXT: lsr x9, x0, x1
497 ; CHECK-NEXT: lsl w8, w8, w2
498 ; CHECK-NEXT: bic w0, w9, w8
500 %shiftedval = lshr i64 %val, %numskipbits
501 %truncshiftedval = trunc i64 %shiftedval to i32
502 %widenumlowbits = zext i8 %numlowbits to i32
503 %notmask = shl nsw i32 -1, %widenumlowbits
504 %mask = xor i32 %notmask, -1
505 %res = and i32 %truncshiftedval, %mask
509 ; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
510 ; Masking is 64-bit. Then truncation.
511 define i32 @bextr64_32_b2(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
512 ; CHECK-LABEL: bextr64_32_b2:
514 ; CHECK-NEXT: mov w8, #-1 // =0xffffffff
515 ; CHECK-NEXT: // kill: def $w2 killed $w2 def $x2
516 ; CHECK-NEXT: lsr x9, x0, x1
517 ; CHECK-NEXT: lsl w8, w8, w2
518 ; CHECK-NEXT: bic w0, w9, w8
520 %shiftedval = lshr i64 %val, %numskipbits
521 %widenumlowbits = zext i8 %numlowbits to i32
522 %notmask = shl nsw i32 -1, %widenumlowbits
523 %mask = xor i32 %notmask, -1
524 %zextmask = zext i32 %mask to i64
525 %wideres = and i64 %shiftedval, %zextmask
526 %res = trunc i64 %wideres to i32
530 ; ---------------------------------------------------------------------------- ;
532 ; ---------------------------------------------------------------------------- ;
534 define i32 @bextr32_c0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
535 ; CHECK-LABEL: bextr32_c0:
537 ; CHECK-NEXT: neg w8, w2
538 ; CHECK-NEXT: mov w9, #-1 // =0xffffffff
539 ; CHECK-NEXT: lsr w10, w0, w1
540 ; CHECK-NEXT: lsr w8, w9, w8
541 ; CHECK-NEXT: and w0, w8, w10
543 %shifted = lshr i32 %val, %numskipbits
544 %numhighbits = sub i32 32, %numlowbits
545 %mask = lshr i32 -1, %numhighbits
546 %masked = and i32 %mask, %shifted
550 define i32 @bextr32_c1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
551 ; CHECK-LABEL: bextr32_c1_indexzext:
553 ; CHECK-NEXT: mov w8, #32 // =0x20
554 ; CHECK-NEXT: mov w9, #-1 // =0xffffffff
555 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
556 ; CHECK-NEXT: lsr w10, w0, w1
557 ; CHECK-NEXT: sub w8, w8, w2
558 ; CHECK-NEXT: lsr w8, w9, w8
559 ; CHECK-NEXT: and w0, w8, w10
561 %skip = zext i8 %numskipbits to i32
562 %shifted = lshr i32 %val, %skip
563 %numhighbits = sub i8 32, %numlowbits
564 %sh_prom = zext i8 %numhighbits to i32
565 %mask = lshr i32 -1, %sh_prom
566 %masked = and i32 %mask, %shifted
570 define i32 @bextr32_c2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
571 ; CHECK-LABEL: bextr32_c2_load:
573 ; CHECK-NEXT: ldr w8, [x0]
574 ; CHECK-NEXT: neg w9, w2
575 ; CHECK-NEXT: mov w10, #-1 // =0xffffffff
576 ; CHECK-NEXT: lsr w9, w10, w9
577 ; CHECK-NEXT: lsr w8, w8, w1
578 ; CHECK-NEXT: and w0, w9, w8
580 %val = load i32, ptr %w
581 %shifted = lshr i32 %val, %numskipbits
582 %numhighbits = sub i32 32, %numlowbits
583 %mask = lshr i32 -1, %numhighbits
584 %masked = and i32 %mask, %shifted
588 define i32 @bextr32_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
589 ; CHECK-LABEL: bextr32_c3_load_indexzext:
591 ; CHECK-NEXT: ldr w8, [x0]
592 ; CHECK-NEXT: mov w9, #32 // =0x20
593 ; CHECK-NEXT: mov w10, #-1 // =0xffffffff
594 ; CHECK-NEXT: sub w9, w9, w2
595 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
596 ; CHECK-NEXT: lsr w8, w8, w1
597 ; CHECK-NEXT: lsr w9, w10, w9
598 ; CHECK-NEXT: and w0, w9, w8
600 %val = load i32, ptr %w
601 %skip = zext i8 %numskipbits to i32
602 %shifted = lshr i32 %val, %skip
603 %numhighbits = sub i8 32, %numlowbits
604 %sh_prom = zext i8 %numhighbits to i32
605 %mask = lshr i32 -1, %sh_prom
606 %masked = and i32 %mask, %shifted
610 define i32 @bextr32_c4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
611 ; CHECK-LABEL: bextr32_c4_commutative:
613 ; CHECK-NEXT: neg w8, w2
614 ; CHECK-NEXT: mov w9, #-1 // =0xffffffff
615 ; CHECK-NEXT: lsr w10, w0, w1
616 ; CHECK-NEXT: lsr w8, w9, w8
617 ; CHECK-NEXT: and w0, w10, w8
619 %shifted = lshr i32 %val, %numskipbits
620 %numhighbits = sub i32 32, %numlowbits
621 %mask = lshr i32 -1, %numhighbits
622 %masked = and i32 %shifted, %mask ; swapped order
628 define i64 @bextr64_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
629 ; CHECK-LABEL: bextr64_c0:
631 ; CHECK-NEXT: neg x8, x2
632 ; CHECK-NEXT: mov x9, #-1 // =0xffffffffffffffff
633 ; CHECK-NEXT: lsr x10, x0, x1
634 ; CHECK-NEXT: lsr x8, x9, x8
635 ; CHECK-NEXT: and x0, x8, x10
637 %shifted = lshr i64 %val, %numskipbits
638 %numhighbits = sub i64 64, %numlowbits
639 %mask = lshr i64 -1, %numhighbits
640 %masked = and i64 %mask, %shifted
644 define i64 @bextr64_c1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
645 ; CHECK-LABEL: bextr64_c1_indexzext:
647 ; CHECK-NEXT: mov w8, #64 // =0x40
648 ; CHECK-NEXT: mov x9, #-1 // =0xffffffffffffffff
649 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
650 ; CHECK-NEXT: lsr x10, x0, x1
651 ; CHECK-NEXT: sub w8, w8, w2
652 ; CHECK-NEXT: lsr x8, x9, x8
653 ; CHECK-NEXT: and x0, x8, x10
655 %skip = zext i8 %numskipbits to i64
656 %shifted = lshr i64 %val, %skip
657 %numhighbits = sub i8 64, %numlowbits
658 %sh_prom = zext i8 %numhighbits to i64
659 %mask = lshr i64 -1, %sh_prom
660 %masked = and i64 %mask, %shifted
664 define i64 @bextr64_c2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
665 ; CHECK-LABEL: bextr64_c2_load:
667 ; CHECK-NEXT: ldr x8, [x0]
668 ; CHECK-NEXT: neg x9, x2
669 ; CHECK-NEXT: mov x10, #-1 // =0xffffffffffffffff
670 ; CHECK-NEXT: lsr x9, x10, x9
671 ; CHECK-NEXT: lsr x8, x8, x1
672 ; CHECK-NEXT: and x0, x9, x8
674 %val = load i64, ptr %w
675 %shifted = lshr i64 %val, %numskipbits
676 %numhighbits = sub i64 64, %numlowbits
677 %mask = lshr i64 -1, %numhighbits
678 %masked = and i64 %mask, %shifted
682 define i64 @bextr64_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
683 ; CHECK-LABEL: bextr64_c3_load_indexzext:
685 ; CHECK-NEXT: ldr x8, [x0]
686 ; CHECK-NEXT: mov w9, #64 // =0x40
687 ; CHECK-NEXT: mov x10, #-1 // =0xffffffffffffffff
688 ; CHECK-NEXT: sub w9, w9, w2
689 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
690 ; CHECK-NEXT: lsr x8, x8, x1
691 ; CHECK-NEXT: lsr x9, x10, x9
692 ; CHECK-NEXT: and x0, x9, x8
694 %val = load i64, ptr %w
695 %skip = zext i8 %numskipbits to i64
696 %shifted = lshr i64 %val, %skip
697 %numhighbits = sub i8 64, %numlowbits
698 %sh_prom = zext i8 %numhighbits to i64
699 %mask = lshr i64 -1, %sh_prom
700 %masked = and i64 %mask, %shifted
704 define i64 @bextr64_c4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
705 ; CHECK-LABEL: bextr64_c4_commutative:
707 ; CHECK-NEXT: neg x8, x2
708 ; CHECK-NEXT: mov x9, #-1 // =0xffffffffffffffff
709 ; CHECK-NEXT: lsr x10, x0, x1
710 ; CHECK-NEXT: lsr x8, x9, x8
711 ; CHECK-NEXT: and x0, x10, x8
713 %shifted = lshr i64 %val, %numskipbits
714 %numhighbits = sub i64 64, %numlowbits
715 %mask = lshr i64 -1, %numhighbits
716 %masked = and i64 %shifted, %mask ; swapped order
720 ; 64-bit, but with 32-bit output
722 ; Everything done in 64-bit, truncation happens last.
723 define i32 @bextr64_32_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
724 ; CHECK-LABEL: bextr64_32_c0:
726 ; CHECK-NEXT: neg x8, x2
727 ; CHECK-NEXT: mov x9, #-1 // =0xffffffffffffffff
728 ; CHECK-NEXT: lsr x10, x0, x1
729 ; CHECK-NEXT: lsr x8, x9, x8
730 ; CHECK-NEXT: and w0, w8, w10
732 %shifted = lshr i64 %val, %numskipbits
733 %numhighbits = sub i64 64, %numlowbits
734 %mask = lshr i64 -1, %numhighbits
735 %masked = and i64 %mask, %shifted
736 %res = trunc i64 %masked to i32
740 ; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
741 define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
742 ; CHECK-LABEL: bextr64_32_c1:
744 ; CHECK-NEXT: neg w8, w2
745 ; CHECK-NEXT: mov w9, #-1 // =0xffffffff
746 ; CHECK-NEXT: lsr x10, x0, x1
747 ; CHECK-NEXT: lsr w8, w9, w8
748 ; CHECK-NEXT: and w0, w8, w10
750 %shifted = lshr i64 %val, %numskipbits
751 %truncshifted = trunc i64 %shifted to i32
752 %numhighbits = sub i32 32, %numlowbits
753 %mask = lshr i32 -1, %numhighbits
754 %masked = and i32 %mask, %truncshifted
758 ; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
759 ; Masking is 64-bit. Then truncation.
760 define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
761 ; CHECK-LABEL: bextr64_32_c2:
763 ; CHECK-NEXT: neg w8, w2
764 ; CHECK-NEXT: mov w9, #-1 // =0xffffffff
765 ; CHECK-NEXT: lsr x10, x0, x1
766 ; CHECK-NEXT: lsr w8, w9, w8
767 ; CHECK-NEXT: and w0, w8, w10
769 %shifted = lshr i64 %val, %numskipbits
770 %numhighbits = sub i32 32, %numlowbits
771 %mask = lshr i32 -1, %numhighbits
772 %zextmask = zext i32 %mask to i64
773 %masked = and i64 %zextmask, %shifted
774 %truncmasked = trunc i64 %masked to i32
778 ; ---------------------------------------------------------------------------- ;
780 ; ---------------------------------------------------------------------------- ;
782 define i32 @bextr32_d0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
783 ; CHECK-LABEL: bextr32_d0:
785 ; CHECK-NEXT: lsr w8, w0, w1
786 ; CHECK-NEXT: neg w9, w2
787 ; CHECK-NEXT: lsl w8, w8, w9
788 ; CHECK-NEXT: lsr w0, w8, w9
790 %shifted = lshr i32 %val, %numskipbits
791 %numhighbits = sub i32 32, %numlowbits
792 %highbitscleared = shl i32 %shifted, %numhighbits
793 %masked = lshr i32 %highbitscleared, %numhighbits
797 define i32 @bextr32_d1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
798 ; CHECK-LABEL: bextr32_d1_indexzext:
800 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
801 ; CHECK-NEXT: lsr w8, w0, w1
802 ; CHECK-NEXT: mov w9, #32 // =0x20
803 ; CHECK-NEXT: sub w9, w9, w2
804 ; CHECK-NEXT: lsl w8, w8, w9
805 ; CHECK-NEXT: lsr w0, w8, w9
807 %skip = zext i8 %numskipbits to i32
808 %shifted = lshr i32 %val, %skip
809 %numhighbits = sub i8 32, %numlowbits
810 %sh_prom = zext i8 %numhighbits to i32
811 %highbitscleared = shl i32 %shifted, %sh_prom
812 %masked = lshr i32 %highbitscleared, %sh_prom
816 define i32 @bextr32_d2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
817 ; CHECK-LABEL: bextr32_d2_load:
819 ; CHECK-NEXT: ldr w8, [x0]
820 ; CHECK-NEXT: neg w9, w2
821 ; CHECK-NEXT: lsr w8, w8, w1
822 ; CHECK-NEXT: lsl w8, w8, w9
823 ; CHECK-NEXT: lsr w0, w8, w9
825 %val = load i32, ptr %w
826 %shifted = lshr i32 %val, %numskipbits
827 %numhighbits = sub i32 32, %numlowbits
828 %highbitscleared = shl i32 %shifted, %numhighbits
829 %masked = lshr i32 %highbitscleared, %numhighbits
833 define i32 @bextr32_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
834 ; CHECK-LABEL: bextr32_d3_load_indexzext:
836 ; CHECK-NEXT: ldr w8, [x0]
837 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
838 ; CHECK-NEXT: mov w9, #32 // =0x20
839 ; CHECK-NEXT: sub w9, w9, w2
840 ; CHECK-NEXT: lsr w8, w8, w1
841 ; CHECK-NEXT: lsl w8, w8, w9
842 ; CHECK-NEXT: lsr w0, w8, w9
844 %val = load i32, ptr %w
845 %skip = zext i8 %numskipbits to i32
846 %shifted = lshr i32 %val, %skip
847 %numhighbits = sub i8 32, %numlowbits
848 %sh_prom = zext i8 %numhighbits to i32
849 %highbitscleared = shl i32 %shifted, %sh_prom
850 %masked = lshr i32 %highbitscleared, %sh_prom
856 define i64 @bextr64_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
857 ; CHECK-LABEL: bextr64_d0:
859 ; CHECK-NEXT: lsr x8, x0, x1
860 ; CHECK-NEXT: neg x9, x2
861 ; CHECK-NEXT: lsl x8, x8, x9
862 ; CHECK-NEXT: lsr x0, x8, x9
864 %shifted = lshr i64 %val, %numskipbits
865 %numhighbits = sub i64 64, %numlowbits
866 %highbitscleared = shl i64 %shifted, %numhighbits
867 %masked = lshr i64 %highbitscleared, %numhighbits
871 define i64 @bextr64_d1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
872 ; CHECK-LABEL: bextr64_d1_indexzext:
874 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
875 ; CHECK-NEXT: lsr x8, x0, x1
876 ; CHECK-NEXT: mov w9, #64 // =0x40
877 ; CHECK-NEXT: sub w9, w9, w2
878 ; CHECK-NEXT: lsl x8, x8, x9
879 ; CHECK-NEXT: lsr x0, x8, x9
881 %skip = zext i8 %numskipbits to i64
882 %shifted = lshr i64 %val, %skip
883 %numhighbits = sub i8 64, %numlowbits
884 %sh_prom = zext i8 %numhighbits to i64
885 %highbitscleared = shl i64 %shifted, %sh_prom
886 %masked = lshr i64 %highbitscleared, %sh_prom
890 define i64 @bextr64_d2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
891 ; CHECK-LABEL: bextr64_d2_load:
893 ; CHECK-NEXT: ldr x8, [x0]
894 ; CHECK-NEXT: neg x9, x2
895 ; CHECK-NEXT: lsr x8, x8, x1
896 ; CHECK-NEXT: lsl x8, x8, x9
897 ; CHECK-NEXT: lsr x0, x8, x9
899 %val = load i64, ptr %w
900 %shifted = lshr i64 %val, %numskipbits
901 %numhighbits = sub i64 64, %numlowbits
902 %highbitscleared = shl i64 %shifted, %numhighbits
903 %masked = lshr i64 %highbitscleared, %numhighbits
907 define i64 @bextr64_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
908 ; CHECK-LABEL: bextr64_d3_load_indexzext:
910 ; CHECK-NEXT: ldr x8, [x0]
911 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
912 ; CHECK-NEXT: mov w9, #64 // =0x40
913 ; CHECK-NEXT: sub w9, w9, w2
914 ; CHECK-NEXT: lsr x8, x8, x1
915 ; CHECK-NEXT: lsl x8, x8, x9
916 ; CHECK-NEXT: lsr x0, x8, x9
918 %val = load i64, ptr %w
919 %skip = zext i8 %numskipbits to i64
920 %shifted = lshr i64 %val, %skip
921 %numhighbits = sub i8 64, %numlowbits
922 %sh_prom = zext i8 %numhighbits to i64
923 %highbitscleared = shl i64 %shifted, %sh_prom
924 %masked = lshr i64 %highbitscleared, %sh_prom
928 ; 64-bit, but with 32-bit output
930 ; Everything done in 64-bit, truncation happens last.
931 define i32 @bextr64_32_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
932 ; CHECK-LABEL: bextr64_32_d0:
934 ; CHECK-NEXT: lsr x8, x0, x1
935 ; CHECK-NEXT: neg x9, x2
936 ; CHECK-NEXT: lsl x8, x8, x9
937 ; CHECK-NEXT: lsr x0, x8, x9
938 ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
940 %shifted = lshr i64 %val, %numskipbits
941 %numhighbits = sub i64 64, %numlowbits
942 %highbitscleared = shl i64 %shifted, %numhighbits
943 %masked = lshr i64 %highbitscleared, %numhighbits
944 %res = trunc i64 %masked to i32
948 ; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
949 define i32 @bextr64_32_d1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
950 ; CHECK-LABEL: bextr64_32_d1:
952 ; CHECK-NEXT: lsr x8, x0, x1
953 ; CHECK-NEXT: neg w9, w2
954 ; CHECK-NEXT: lsl w8, w8, w9
955 ; CHECK-NEXT: lsr w0, w8, w9
957 %shifted = lshr i64 %val, %numskipbits
958 %truncshifted = trunc i64 %shifted to i32
959 %numhighbits = sub i32 32, %numlowbits
960 %highbitscleared = shl i32 %truncshifted, %numhighbits
961 %masked = lshr i32 %highbitscleared, %numhighbits
965 ; ---------------------------------------------------------------------------- ;
967 ; ---------------------------------------------------------------------------- ;
969 ; https://bugs.llvm.org/show_bug.cgi?id=38938
970 define void @pr38938(ptr %a0, ptr %a1) nounwind {
971 ; CHECK-LABEL: pr38938:
973 ; CHECK-NEXT: ldr x8, [x1]
974 ; CHECK-NEXT: ubfx x8, x8, #21, #10
975 ; CHECK-NEXT: lsl x8, x8, #2
976 ; CHECK-NEXT: ldr w9, [x0, x8]
977 ; CHECK-NEXT: add w9, w9, #1
978 ; CHECK-NEXT: str w9, [x0, x8]
980 %tmp = load i64, ptr %a1, align 8
981 %tmp1 = lshr i64 %tmp, 21
982 %tmp2 = and i64 %tmp1, 1023
983 %tmp3 = getelementptr inbounds i32, ptr %a0, i64 %tmp2
984 %tmp4 = load i32, ptr %tmp3, align 4
985 %tmp5 = add nsw i32 %tmp4, 1
986 store i32 %tmp5, ptr %tmp3, align 4
990 ; The most canonical variant
991 define i32 @c0_i32(i32 %arg) nounwind {
992 ; CHECK-LABEL: c0_i32:
994 ; CHECK-NEXT: ubfx w0, w0, #19, #10
996 %tmp0 = lshr i32 %arg, 19
997 %tmp1 = and i32 %tmp0, 1023
1001 ; Should be still fine, but the mask is shifted
1002 define i32 @c1_i32(i32 %arg) nounwind {
1003 ; CHECK-LABEL: c1_i32:
1005 ; CHECK-NEXT: lsr w8, w0, #19
1006 ; CHECK-NEXT: and w0, w8, #0xffc
1008 %tmp0 = lshr i32 %arg, 19
1009 %tmp1 = and i32 %tmp0, 4092
1013 ; Should be still fine, but the result is shifted left afterwards
1014 define i32 @c2_i32(i32 %arg) nounwind {
1015 ; CHECK-LABEL: c2_i32:
1017 ; CHECK-NEXT: ubfx w8, w0, #19, #10
1018 ; CHECK-NEXT: lsl w0, w8, #2
1020 %tmp0 = lshr i32 %arg, 19
1021 %tmp1 = and i32 %tmp0, 1023
1022 %tmp2 = shl i32 %tmp1, 2
1026 ; The mask covers newly shifted-in bit
1027 define i32 @c4_i32_bad(i32 %arg) nounwind {
1028 ; CHECK-LABEL: c4_i32_bad:
1030 ; CHECK-NEXT: lsr w8, w0, #19
1031 ; CHECK-NEXT: and w0, w8, #0x1ffe
1033 %tmp0 = lshr i32 %arg, 19
1034 %tmp1 = and i32 %tmp0, 16382
1040 ; The most canonical variant
1041 define i64 @c0_i64(i64 %arg) nounwind {
1042 ; CHECK-LABEL: c0_i64:
1044 ; CHECK-NEXT: ubfx x0, x0, #51, #10
1046 %tmp0 = lshr i64 %arg, 51
1047 %tmp1 = and i64 %tmp0, 1023
1051 ; Should be still fine, but the mask is shifted
1052 define i64 @c1_i64(i64 %arg) nounwind {
1053 ; CHECK-LABEL: c1_i64:
1055 ; CHECK-NEXT: lsr x8, x0, #51
1056 ; CHECK-NEXT: and x0, x8, #0xffc
1058 %tmp0 = lshr i64 %arg, 51
1059 %tmp1 = and i64 %tmp0, 4092
1063 ; Should be still fine, but the result is shifted left afterwards
1064 define i64 @c2_i64(i64 %arg) nounwind {
1065 ; CHECK-LABEL: c2_i64:
1067 ; CHECK-NEXT: ubfx x8, x0, #51, #10
1068 ; CHECK-NEXT: lsl x0, x8, #2
1070 %tmp0 = lshr i64 %arg, 51
1071 %tmp1 = and i64 %tmp0, 1023
1072 %tmp2 = shl i64 %tmp1, 2
1076 ; The mask covers newly shifted-in bit
1077 define i64 @c4_i64_bad(i64 %arg) nounwind {
1078 ; CHECK-LABEL: c4_i64_bad:
1080 ; CHECK-NEXT: lsr x8, x0, #51
1081 ; CHECK-NEXT: and x0, x8, #0x1ffe
1083 %tmp0 = lshr i64 %arg, 51
1084 %tmp1 = and i64 %tmp0, 16382
1088 ; ---------------------------------------------------------------------------- ;
1089 ; Constant, storing the result afterwards.
1090 ; ---------------------------------------------------------------------------- ;
1094 ; The most canonical variant
1095 define void @c5_i32(i32 %arg, ptr %ptr) nounwind {
1096 ; CHECK-LABEL: c5_i32:
1098 ; CHECK-NEXT: ubfx w8, w0, #19, #10
1099 ; CHECK-NEXT: str w8, [x1]
1101 %tmp0 = lshr i32 %arg, 19
1102 %tmp1 = and i32 %tmp0, 1023
1103 store i32 %tmp1, ptr %ptr
1107 ; Should be still fine, but the mask is shifted
1108 define void @c6_i32(i32 %arg, ptr %ptr) nounwind {
1109 ; CHECK-LABEL: c6_i32:
1111 ; CHECK-NEXT: ubfx w8, w0, #19, #12
1112 ; CHECK-NEXT: str w8, [x1]
1114 %tmp0 = lshr i32 %arg, 19
1115 %tmp1 = and i32 %tmp0, 4095
1116 store i32 %tmp1, ptr %ptr
1120 ; Should be still fine, but the result is shifted left afterwards
1121 define void @c7_i32(i32 %arg, ptr %ptr) nounwind {
1122 ; CHECK-LABEL: c7_i32:
1124 ; CHECK-NEXT: ubfx w8, w0, #19, #10
1125 ; CHECK-NEXT: lsl w8, w8, #2
1126 ; CHECK-NEXT: str w8, [x1]
1128 %tmp0 = lshr i32 %arg, 19
1129 %tmp1 = and i32 %tmp0, 1023
1130 %tmp2 = shl i32 %tmp1, 2
1131 store i32 %tmp2, ptr %ptr
1137 ; The most canonical variant
1138 define void @c5_i64(i64 %arg, ptr %ptr) nounwind {
1139 ; CHECK-LABEL: c5_i64:
1141 ; CHECK-NEXT: ubfx x8, x0, #51, #10
1142 ; CHECK-NEXT: str x8, [x1]
1144 %tmp0 = lshr i64 %arg, 51
1145 %tmp1 = and i64 %tmp0, 1023
1146 store i64 %tmp1, ptr %ptr
1150 ; Should be still fine, but the mask is shifted
1151 define void @c6_i64(i64 %arg, ptr %ptr) nounwind {
1152 ; CHECK-LABEL: c6_i64:
1154 ; CHECK-NEXT: ubfx x8, x0, #51, #12
1155 ; CHECK-NEXT: str x8, [x1]
1157 %tmp0 = lshr i64 %arg, 51
1158 %tmp1 = and i64 %tmp0, 4095
1159 store i64 %tmp1, ptr %ptr
1163 ; Should be still fine, but the result is shifted left afterwards
1164 define void @c7_i64(i64 %arg, ptr %ptr) nounwind {
1165 ; CHECK-LABEL: c7_i64:
1167 ; CHECK-NEXT: ubfx x8, x0, #51, #10
1168 ; CHECK-NEXT: lsl x8, x8, #2
1169 ; CHECK-NEXT: str x8, [x1]
1171 %tmp0 = lshr i64 %arg, 51
1172 %tmp1 = and i64 %tmp0, 1023
1173 %tmp2 = shl i64 %tmp1, 2
1174 store i64 %tmp2, ptr %ptr