1 ; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s
3 define i8 @test_64bit_add(ptr %a, i64 %b) {
4 ; CHECK-LABEL: test_64bit_add:
5 ; CHECK: ldrh w0, [x0, x1, lsl #1]
7 %tmp1 = getelementptr inbounds i16, ptr %a, i64 %b
8 %tmp2 = load i16, ptr %tmp1
9 %tmp3 = trunc i16 %tmp2 to i8
13 ; These tests are trying to form SEXT and ZEXT operations that never leave i64
14 ; space, to make sure LLVM can adapt the offset register correctly.
15 define void @ldst_8bit(ptr %base, i64 %offset) minsize {
16 ; CHECK-LABEL: ldst_8bit:
18 %off32.sext.tmp = shl i64 %offset, 32
19 %off32.sext = ashr i64 %off32.sext.tmp, 32
20 %addr8_sxtw = getelementptr i8, ptr %base, i64 %off32.sext
21 %val8_sxtw = load volatile i8, ptr %addr8_sxtw
22 %val32_signed = sext i8 %val8_sxtw to i32
23 store volatile i32 %val32_signed, ptr @var_32bit
24 ; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
26 %addrint_uxtw = ptrtoint ptr %base to i64
27 %offset_uxtw = and i64 %offset, 4294967295
28 %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
29 %addr_uxtw = inttoptr i64 %addrint1_uxtw to ptr
30 %val8_uxtw = load volatile i8, ptr %addr_uxtw
31 %newval8 = add i8 %val8_uxtw, 1
32 store volatile i8 %newval8, ptr @var_8bit
33 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
39 define void @ldst_16bit(ptr %base, i64 %offset) minsize {
40 ; CHECK-LABEL: ldst_16bit:
42 %addrint_uxtw = ptrtoint ptr %base to i64
43 %offset_uxtw = and i64 %offset, 4294967295
44 %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
45 %addr_uxtw = inttoptr i64 %addrint1_uxtw to ptr
46 %val8_uxtw = load volatile i16, ptr %addr_uxtw
47 %newval8 = add i16 %val8_uxtw, 1
48 store volatile i16 %newval8, ptr @var_16bit
49 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
51 %base_sxtw = ptrtoint ptr %base to i64
52 %offset_sxtw.tmp = shl i64 %offset, 32
53 %offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
54 %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
55 %addr_sxtw = inttoptr i64 %addrint_sxtw to ptr
56 %val16_sxtw = load volatile i16, ptr %addr_sxtw
57 %val64_signed = sext i16 %val16_sxtw to i64
58 store volatile i64 %val64_signed, ptr @var_64bit
59 ; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
62 %base_uxtwN = ptrtoint ptr %base to i64
63 %offset_uxtwN = and i64 %offset, 4294967295
64 %offset2_uxtwN = shl i64 %offset_uxtwN, 1
65 %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
66 %addr_uxtwN = inttoptr i64 %addrint_uxtwN to ptr
67 %val32 = load volatile i32, ptr @var_32bit
68 %val16_trunc32 = trunc i32 %val32 to i16
69 store volatile i16 %val16_trunc32, ptr %addr_uxtwN
70 ; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #1]
74 define void @ldst_32bit(ptr %base, i64 %offset) minsize {
75 ; CHECK-LABEL: ldst_32bit:
77 %addrint_uxtw = ptrtoint ptr %base to i64
78 %offset_uxtw = and i64 %offset, 4294967295
79 %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
80 %addr_uxtw = inttoptr i64 %addrint1_uxtw to ptr
81 %val32_uxtw = load volatile i32, ptr %addr_uxtw
82 %newval32 = add i32 %val32_uxtw, 1
83 store volatile i32 %newval32, ptr @var_32bit
84 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
86 %base_sxtw = ptrtoint ptr %base to i64
87 %offset_sxtw.tmp = shl i64 %offset, 32
88 %offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
89 %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
90 %addr_sxtw = inttoptr i64 %addrint_sxtw to ptr
91 %val32_sxtw = load volatile i32, ptr %addr_sxtw
92 %val64_signed = sext i32 %val32_sxtw to i64
93 store volatile i64 %val64_signed, ptr @var_64bit
94 ; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
97 %base_uxtwN = ptrtoint ptr %base to i64
98 %offset_uxtwN = and i64 %offset, 4294967295
99 %offset2_uxtwN = shl i64 %offset_uxtwN, 2
100 %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
101 %addr_uxtwN = inttoptr i64 %addrint_uxtwN to ptr
102 %val32 = load volatile i32, ptr @var_32bit
103 store volatile i32 %val32, ptr %addr_uxtwN
104 ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #2]
108 define void @ldst_64bit(ptr %base, i64 %offset) minsize {
109 ; CHECK-LABEL: ldst_64bit:
111 %addrint_uxtw = ptrtoint ptr %base to i64
112 %offset_uxtw = and i64 %offset, 4294967295
113 %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
114 %addr_uxtw = inttoptr i64 %addrint1_uxtw to ptr
115 %val64_uxtw = load volatile i64, ptr %addr_uxtw
116 %newval8 = add i64 %val64_uxtw, 1
117 store volatile i64 %newval8, ptr @var_64bit
118 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
120 %base_sxtw = ptrtoint ptr %base to i64
121 %offset_sxtw.tmp = shl i64 %offset, 32
122 %offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
123 %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
124 %addr_sxtw = inttoptr i64 %addrint_sxtw to ptr
125 %val64_sxtw = load volatile i64, ptr %addr_sxtw
126 store volatile i64 %val64_sxtw, ptr @var_64bit
127 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
130 %base_uxtwN = ptrtoint ptr %base to i64
131 %offset_uxtwN = and i64 %offset, 4294967295
132 %offset2_uxtwN = shl i64 %offset_uxtwN, 3
133 %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
134 %addr_uxtwN = inttoptr i64 %addrint_uxtwN to ptr
135 %val64 = load volatile i64, ptr @var_64bit
136 store volatile i64 %val64, ptr %addr_uxtwN
137 ; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #3]
141 @var_8bit = global i8 0
142 @var_16bit = global i16 0
143 @var_32bit = global i32 0
144 @var_64bit = global i64 0