1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -aarch64-enable-subreg-liveness-tracking -mtriple=arm64-eabi < %s | FileCheck %s
5 @object = external hidden global i64, section "__DATA, __objc_ivar", align 8
8 define void @t1(ptr %object) {
11 ; CHECK-NEXT: ldr xzr, [x0, #8]
13 %incdec.ptr = getelementptr inbounds i64, ptr %object, i64 1
14 %tmp = load volatile i64, ptr %incdec.ptr, align 8
18 ; base + offset (> imm9)
19 define void @t2(ptr %object) {
22 ; CHECK-NEXT: sub x8, x0, #264
23 ; CHECK-NEXT: ldr xzr, [x8]
25 %incdec.ptr = getelementptr inbounds i64, ptr %object, i64 -33
26 %tmp = load volatile i64, ptr %incdec.ptr, align 8
30 ; base + unsigned offset (> imm9 and <= imm12 * size of type in bytes)
31 define void @t3(ptr %object) {
34 ; CHECK-NEXT: ldr xzr, [x0, #32760]
36 %incdec.ptr = getelementptr inbounds i64, ptr %object, i64 4095
37 %tmp = load volatile i64, ptr %incdec.ptr, align 8
41 ; base + unsigned offset (> imm12 * size of type in bytes)
42 define void @t4(ptr %object) {
45 ; CHECK-NEXT: mov w8, #32768 // =0x8000
46 ; CHECK-NEXT: ldr xzr, [x0, x8]
48 %incdec.ptr = getelementptr inbounds i64, ptr %object, i64 4096
49 %tmp = load volatile i64, ptr %incdec.ptr, align 8
54 define void @t5(i64 %a) {
57 ; CHECK-NEXT: adrp x8, object
58 ; CHECK-NEXT: add x8, x8, :lo12:object
59 ; CHECK-NEXT: ldr xzr, [x8, x0, lsl #3]
61 %incdec.ptr = getelementptr inbounds i64, ptr @object, i64 %a
62 %tmp = load volatile i64, ptr %incdec.ptr, align 8
67 define void @t6(i64 %a, ptr %object) {
70 ; CHECK-NEXT: add x8, x1, x0, lsl #3
71 ; CHECK-NEXT: mov w9, #32768 // =0x8000
72 ; CHECK-NEXT: ldr xzr, [x8, x9]
74 %tmp1 = getelementptr inbounds i64, ptr %object, i64 %a
75 %incdec.ptr = getelementptr inbounds i64, ptr %tmp1, i64 4096
76 %tmp = load volatile i64, ptr %incdec.ptr, align 8
80 ; Test base + wide immediate
81 define void @t7(i64 %a) {
84 ; CHECK-NEXT: mov w8, #65535 // =0xffff
85 ; CHECK-NEXT: ldr xzr, [x0, x8]
87 %1 = add i64 %a, 65535 ;0xffff
88 %2 = inttoptr i64 %1 to ptr
89 %3 = load volatile i64, ptr %2, align 8
93 define void @t8(i64 %a) {
96 ; CHECK-NEXT: mov x8, #-4662 // =0xffffffffffffedca
97 ; CHECK-NEXT: ldr xzr, [x0, x8]
99 %1 = sub i64 %a, 4662 ;-4662 is 0xffffffffffffedca
100 %2 = inttoptr i64 %1 to ptr
101 %3 = load volatile i64, ptr %2, align 8
105 define void @t9(i64 %a) {
108 ; CHECK-NEXT: mov x8, #-305463297 // =0xffffffffedcaffff
109 ; CHECK-NEXT: ldr xzr, [x0, x8]
111 %1 = add i64 -305463297, %a ;-305463297 is 0xffffffffedcaffff
112 %2 = inttoptr i64 %1 to ptr
113 %3 = load volatile i64, ptr %2, align 8
117 define void @t10(i64 %a) {
120 ; CHECK-NEXT: mov x8, #81909218222800896 // =0x123000000000000
121 ; CHECK-NEXT: ldr xzr, [x0, x8]
123 %1 = add i64 %a, 81909218222800896 ;0x123000000000000
124 %2 = inttoptr i64 %1 to ptr
125 %3 = load volatile i64, ptr %2, align 8
129 define void @t11(i64 %a) {
132 ; CHECK-NEXT: mov w8, #17767 // =0x4567
133 ; CHECK-NEXT: movk w8, #291, lsl #16
134 ; CHECK-NEXT: ldr xzr, [x0, x8]
136 %1 = add i64 %a, 19088743 ;0x1234567
137 %2 = inttoptr i64 %1 to ptr
138 %3 = load volatile i64, ptr %2, align 8
142 ; Test some boundaries that should not use movz/movn/orr
143 define void @t12(i64 %a) {
146 ; CHECK-NEXT: add x8, x0, #4095
147 ; CHECK-NEXT: ldr xzr, [x8]
149 %1 = add i64 %a, 4095 ;0xfff
150 %2 = inttoptr i64 %1 to ptr
151 %3 = load volatile i64, ptr %2, align 8
155 define void @t13(i64 %a) {
158 ; CHECK-NEXT: sub x8, x0, #4095
159 ; CHECK-NEXT: ldr xzr, [x8]
161 %1 = add i64 %a, -4095 ;-0xfff
162 %2 = inttoptr i64 %1 to ptr
163 %3 = load volatile i64, ptr %2, align 8
167 define void @t14(i64 %a) {
170 ; CHECK-NEXT: add x8, x0, #291, lsl #12 // =1191936
171 ; CHECK-NEXT: ldr xzr, [x8]
173 %1 = add i64 %a, 1191936 ;0x123000
174 %2 = inttoptr i64 %1 to ptr
175 %3 = load volatile i64, ptr %2, align 8
179 define void @t15(i64 %a) {
182 ; CHECK-NEXT: sub x8, x0, #291, lsl #12 // =1191936
183 ; CHECK-NEXT: ldr xzr, [x8]
185 %1 = add i64 %a, -1191936 ;0xFFFFFFFFFFEDD000
186 %2 = inttoptr i64 %1 to ptr
187 %3 = load volatile i64, ptr %2, align 8
191 define void @t16(i64 %a) {
194 ; CHECK-NEXT: ldr xzr, [x0, #28672]
196 %1 = add i64 %a, 28672 ;0x7000
197 %2 = inttoptr i64 %1 to ptr
198 %3 = load volatile i64, ptr %2, align 8
202 define void @t17(i64 %a) {
205 ; CHECK-NEXT: ldur xzr, [x0, #-256]
207 %1 = add i64 %a, -256 ;-0x100
208 %2 = inttoptr i64 %1 to ptr
209 %3 = load volatile i64, ptr %2, align 8
214 define i8 @LdOffset_i8(ptr %a) {
215 ; CHECK-LABEL: LdOffset_i8:
217 ; CHECK-NEXT: add x8, x0, #253, lsl #12 // =1036288
218 ; CHECK-NEXT: ldrb w0, [x8, #3704]
220 %arrayidx = getelementptr inbounds i8, ptr %a, i64 1039992
221 %val = load i8, ptr %arrayidx, align 1
226 define i32 @LdOffset_i8_zext32(ptr %a) {
227 ; CHECK-LABEL: LdOffset_i8_zext32:
229 ; CHECK-NEXT: add x8, x0, #253, lsl #12 // =1036288
230 ; CHECK-NEXT: ldrb w0, [x8, #3704]
232 %arrayidx = getelementptr inbounds i8, ptr %a, i64 1039992
233 %val = load i8, ptr %arrayidx, align 1
234 %conv = zext i8 %val to i32
239 define i32 @LdOffset_i8_sext32(ptr %a) {
240 ; CHECK-LABEL: LdOffset_i8_sext32:
242 ; CHECK-NEXT: add x8, x0, #253, lsl #12 // =1036288
243 ; CHECK-NEXT: ldrsb w0, [x8, #3704]
245 %arrayidx = getelementptr inbounds i8, ptr %a, i64 1039992
246 %val = load i8, ptr %arrayidx, align 1
247 %conv = sext i8 %val to i32
252 define i64 @LdOffset_i8_zext64(ptr %a) {
253 ; CHECK-LABEL: LdOffset_i8_zext64:
255 ; CHECK-NEXT: add x8, x0, #253, lsl #12 // =1036288
256 ; CHECK-NEXT: ldrb w0, [x8, #3704]
258 %arrayidx = getelementptr inbounds i8, ptr %a, i64 1039992
259 %val = load i8, ptr %arrayidx, align 1
260 %conv = zext i8 %val to i64
265 define i64 @LdOffset_i8_sext64(ptr %a) {
266 ; CHECK-LABEL: LdOffset_i8_sext64:
268 ; CHECK-NEXT: add x8, x0, #253, lsl #12 // =1036288
269 ; CHECK-NEXT: ldrsb x0, [x8, #3704]
271 %arrayidx = getelementptr inbounds i8, ptr %a, i64 1039992
272 %val = load i8, ptr %arrayidx, align 1
273 %conv = sext i8 %val to i64
278 define i16 @LdOffset_i16(ptr %a) {
279 ; CHECK-LABEL: LdOffset_i16:
281 ; CHECK-NEXT: add x8, x0, #506, lsl #12 // =2072576
282 ; CHECK-NEXT: ldrh w0, [x8, #7408]
284 %arrayidx = getelementptr inbounds i16, ptr %a, i64 1039992
285 %val = load i16, ptr %arrayidx, align 2
290 define i32 @LdOffset_i16_zext32(ptr %a) {
291 ; CHECK-LABEL: LdOffset_i16_zext32:
293 ; CHECK-NEXT: add x8, x0, #506, lsl #12 // =2072576
294 ; CHECK-NEXT: ldrh w0, [x8, #7408]
296 %arrayidx = getelementptr inbounds i16, ptr %a, i64 1039992
297 %val = load i16, ptr %arrayidx, align 2
298 %conv = zext i16 %val to i32
303 define i32 @LdOffset_i16_sext32(ptr %a) {
304 ; CHECK-LABEL: LdOffset_i16_sext32:
306 ; CHECK-NEXT: add x8, x0, #506, lsl #12 // =2072576
307 ; CHECK-NEXT: ldrsh w0, [x8, #7408]
309 %arrayidx = getelementptr inbounds i16, ptr %a, i64 1039992
310 %val = load i16, ptr %arrayidx, align 2
311 %conv = sext i16 %val to i32
316 define i64 @LdOffset_i16_zext64(ptr %a) {
317 ; CHECK-LABEL: LdOffset_i16_zext64:
319 ; CHECK-NEXT: add x8, x0, #506, lsl #12 // =2072576
320 ; CHECK-NEXT: ldrh w0, [x8, #7408]
322 %arrayidx = getelementptr inbounds i16, ptr %a, i64 1039992
323 %val = load i16, ptr %arrayidx, align 2
324 %conv = zext i16 %val to i64
329 define i64 @LdOffset_i16_sext64(ptr %a) {
330 ; CHECK-LABEL: LdOffset_i16_sext64:
332 ; CHECK-NEXT: add x8, x0, #506, lsl #12 // =2072576
333 ; CHECK-NEXT: ldrsh x0, [x8, #7408]
335 %arrayidx = getelementptr inbounds i16, ptr %a, i64 1039992
336 %val = load i16, ptr %arrayidx, align 2
337 %conv = sext i16 %val to i64
342 define i32 @LdOffset_i32(ptr %a) {
343 ; CHECK-LABEL: LdOffset_i32:
345 ; CHECK-NEXT: add x8, x0, #1012, lsl #12 // =4145152
346 ; CHECK-NEXT: ldr w0, [x8, #14816]
348 %arrayidx = getelementptr inbounds i32, ptr %a, i64 1039992
349 %val = load i32, ptr %arrayidx, align 4
354 define i64 @LdOffset_i32_zext64(ptr %a) {
355 ; CHECK-LABEL: LdOffset_i32_zext64:
357 ; CHECK-NEXT: add x8, x0, #1012, lsl #12 // =4145152
358 ; CHECK-NEXT: ldr w0, [x8, #14816]
360 %arrayidx = getelementptr inbounds i32, ptr %a, i64 1039992
361 %val = load i32, ptr %arrayidx, align 2
362 %conv = zext i32 %val to i64
367 define i64 @LdOffset_i32_sext64(ptr %a) {
368 ; CHECK-LABEL: LdOffset_i32_sext64:
370 ; CHECK-NEXT: add x8, x0, #1012, lsl #12 // =4145152
371 ; CHECK-NEXT: ldrsw x0, [x8, #14816]
373 %arrayidx = getelementptr inbounds i32, ptr %a, i64 1039992
374 %val = load i32, ptr %arrayidx, align 2
375 %conv = sext i32 %val to i64
380 define i64 @LdOffset_i64(ptr %a) {
381 ; CHECK-LABEL: LdOffset_i64:
383 ; CHECK-NEXT: add x8, x0, #2024, lsl #12 // =8290304
384 ; CHECK-NEXT: ldr x0, [x8, #29632]
386 %arrayidx = getelementptr inbounds i64, ptr %a, i64 1039992
387 %val = load i64, ptr %arrayidx, align 4
392 define <2 x i32> @LdOffset_v2i32(ptr %a) {
393 ; CHECK-LABEL: LdOffset_v2i32:
395 ; CHECK-NEXT: add x8, x0, #2024, lsl #12 // =8290304
396 ; CHECK-NEXT: ldr d0, [x8, #29632]
398 %arrayidx = getelementptr inbounds <2 x i32>, ptr %a, i64 1039992
399 %val = load <2 x i32>, ptr %arrayidx, align 4
404 define <2 x i64> @LdOffset_v2i64(ptr %a) {
405 ; CHECK-LABEL: LdOffset_v2i64:
407 ; CHECK-NEXT: add x8, x0, #4048, lsl #12 // =16580608
408 ; CHECK-NEXT: ldr q0, [x8, #59264]
410 %arrayidx = getelementptr inbounds <2 x i64>, ptr %a, i64 1039992
411 %val = load <2 x i64>, ptr %arrayidx, align 4
416 define double @LdOffset_i8_f64(ptr %a) {
417 ; CHECK-LABEL: LdOffset_i8_f64:
419 ; CHECK-NEXT: add x8, x0, #253, lsl #12 // =1036288
420 ; CHECK-NEXT: ldrsb w8, [x8, #3704]
421 ; CHECK-NEXT: scvtf d0, w8
423 %arrayidx = getelementptr inbounds i8, ptr %a, i64 1039992
424 %val = load i8, ptr %arrayidx, align 1
425 %conv = sitofp i8 %val to double
430 define double @LdOffset_i16_f64(ptr %a) {
431 ; CHECK-LABEL: LdOffset_i16_f64:
433 ; CHECK-NEXT: add x8, x0, #506, lsl #12 // =2072576
434 ; CHECK-NEXT: ldrsh w8, [x8, #7408]
435 ; CHECK-NEXT: scvtf d0, w8
437 %arrayidx = getelementptr inbounds i16, ptr %a, i64 1039992
438 %val = load i16, ptr %arrayidx, align 2
439 %conv = sitofp i16 %val to double
444 define double @LdOffset_i32_f64(ptr %a) {
445 ; CHECK-LABEL: LdOffset_i32_f64:
447 ; CHECK-NEXT: add x8, x0, #1012, lsl #12 // =4145152
448 ; CHECK-NEXT: ldr s0, [x8, #14816]
449 ; CHECK-NEXT: ucvtf d0, d0
451 %arrayidx = getelementptr inbounds i32, ptr %a, i64 1039992
452 %val = load i32, ptr %arrayidx, align 4
453 %conv = uitofp i32 %val to double
458 define double @LdOffset_i64_f64(ptr %a) {
459 ; CHECK-LABEL: LdOffset_i64_f64:
461 ; CHECK-NEXT: add x8, x0, #2024, lsl #12 // =8290304
462 ; CHECK-NEXT: ldr d0, [x8, #29632]
463 ; CHECK-NEXT: scvtf d0, d0
465 %arrayidx = getelementptr inbounds i64, ptr %a, i64 1039992
466 %val = load i64, ptr %arrayidx, align 8
467 %conv = sitofp i64 %val to double
471 define i64 @LdOffset_i64_multi_offset(ptr %a) {
472 ; CHECK-LABEL: LdOffset_i64_multi_offset:
474 ; CHECK-NEXT: add x8, x0, #2031, lsl #12 // =8318976
475 ; CHECK-NEXT: ldr x9, [x8, #960]
476 ; CHECK-NEXT: ldr x8, [x8, #3016]
477 ; CHECK-NEXT: add x0, x8, x9
479 %arrayidx = getelementptr inbounds i64, ptr %a, i64 1039992
480 %val0 = load i64, ptr %arrayidx, align 8
481 %arrayidx1 = getelementptr inbounds i64, ptr %a, i64 1040249
482 %val1 = load i64, ptr %arrayidx1, align 8
483 %add = add nsw i64 %val1, %val0
487 define i64 @LdOffset_i64_multi_offset_with_commmon_base(ptr %a) {
488 ; CHECK-LABEL: LdOffset_i64_multi_offset_with_commmon_base:
490 ; CHECK-NEXT: add x8, x0, #507, lsl #12 // =2076672
491 ; CHECK-NEXT: ldr x9, [x8, #26464]
492 ; CHECK-NEXT: ldr x8, [x8, #26496]
493 ; CHECK-NEXT: add x0, x8, x9
495 %b = getelementptr inbounds i16, ptr %a, i64 1038336
496 %arrayidx = getelementptr inbounds i64, ptr %b, i64 3308
497 %val0 = load i64, ptr %arrayidx, align 8
498 %arrayidx1 = getelementptr inbounds i64, ptr %b, i64 3312
499 %val1 = load i64, ptr %arrayidx1, align 8
500 %add = add nsw i64 %val1, %val0
504 ; Negative test: the offset is odd
505 define i32 @LdOffset_i16_odd_offset(ptr nocapture noundef readonly %a) {
506 ; CHECK-LABEL: LdOffset_i16_odd_offset:
508 ; CHECK-NEXT: mov w8, #56953 // =0xde79
509 ; CHECK-NEXT: movk w8, #15, lsl #16
510 ; CHECK-NEXT: ldrsh w0, [x0, x8]
512 %arrayidx = getelementptr inbounds i8, ptr %a, i64 1039993
513 %val = load i16, ptr %arrayidx, align 2
514 %conv = sext i16 %val to i32
518 ; Already encoded with a single mov MOVNWi
519 define i8 @LdOffset_i8_movnwi(ptr %a) {
520 ; CHECK-LABEL: LdOffset_i8_movnwi:
522 ; CHECK-NEXT: mov w8, #16777215 // =0xffffff
523 ; CHECK-NEXT: ldrb w0, [x0, x8]
525 %arrayidx = getelementptr inbounds i8, ptr %a, i64 16777215
526 %val = load i8, ptr %arrayidx, align 1
530 ; Negative test: the offset is too large to encoded with a add
531 define i8 @LdOffset_i8_too_large(ptr %a) {
532 ; CHECK-LABEL: LdOffset_i8_too_large:
534 ; CHECK-NEXT: mov w8, #1 // =0x1
535 ; CHECK-NEXT: movk w8, #256, lsl #16
536 ; CHECK-NEXT: ldrb w0, [x0, x8]
538 %arrayidx = getelementptr inbounds i8, ptr %a, i64 16777217
539 %val = load i8, ptr %arrayidx, align 1