1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
4 // Test GNU atomic builtins for __int128 (with default alignment of 8 bytes
5 // only), resulting in libcalls.
16 // TODO: This test and several more below have the unnecessary use of an alloca
17 // remaining. This is due to 369c9b7, which changes the behavior of the MemCpyOpt
18 // pass. It seems that a 'writable' attribute should now be added to the argument
19 // in order for this optimization to proceed.
23 // CHECK-NEXT: [[TMP0:%.*]] = load atomic i128, ptr @Ptr seq_cst, align 8
24 // CHECK-NEXT: store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2:![0-9]+]]
25 // CHECK-NEXT: ret void
28 return __atomic_load_n(&Ptr
, memory_order_seq_cst
);
33 // CHECK-NEXT: [[TMP0:%.*]] = load atomic i128, ptr @Ptr seq_cst, align 8
34 // CHECK-NEXT: store i128 [[TMP0]], ptr @Ret, align 8
35 // CHECK-NEXT: store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
36 // CHECK-NEXT: ret void
39 __atomic_load(&Ptr
, &Ret
, memory_order_seq_cst
);
45 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
46 // CHECK-NEXT: store atomic i128 [[TMP0]], ptr @Ptr seq_cst, align 8
47 // CHECK-NEXT: ret void
50 __atomic_store_n(&Ptr
, Val
, memory_order_seq_cst
);
55 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8
56 // CHECK-NEXT: store atomic i128 [[TMP0]], ptr @Ptr seq_cst, align 8
57 // CHECK-NEXT: ret void
60 __atomic_store(&Ptr
, &Val
, memory_order_seq_cst
);
65 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
66 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
67 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
68 // CHECK-NEXT: ret void
71 return __atomic_exchange_n(&Ptr
, Val
, memory_order_seq_cst
);
76 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8
77 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
78 // CHECK-NEXT: store i128 [[TMP1]], ptr @Ret, align 8
79 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
80 // CHECK-NEXT: ret void
83 __atomic_exchange(&Ptr
, &Val
, &Ret
, memory_order_seq_cst
);
89 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Des, align 8, !tbaa [[TBAA2]]
90 // CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @Exp, align 8
91 // CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr @Ptr, i128 [[TMP1]], i128 [[TMP0]] seq_cst seq_cst, align 8
92 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i128, i1 } [[TMP2]], 1
93 // CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
94 // CHECK: cmpxchg.store_expected:
95 // CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i128, i1 } [[TMP2]], 0
96 // CHECK-NEXT: store i128 [[TMP4]], ptr @Exp, align 8
97 // CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
98 // CHECK: cmpxchg.continue:
99 // CHECK-NEXT: ret i1 [[TMP3]]
102 return __atomic_compare_exchange_n(&Ptr
, &Exp
, Des
, 0,
103 memory_order_seq_cst
, memory_order_seq_cst
);
107 // CHECK-NEXT: entry:
108 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Exp, align 8
109 // CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @Des, align 8
110 // CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr @Ptr, i128 [[TMP0]], i128 [[TMP1]] seq_cst seq_cst, align 8
111 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i128, i1 } [[TMP2]], 1
112 // CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
113 // CHECK: cmpxchg.store_expected:
114 // CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i128, i1 } [[TMP2]], 0
115 // CHECK-NEXT: store i128 [[TMP4]], ptr @Exp, align 8
116 // CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
117 // CHECK: cmpxchg.continue:
118 // CHECK-NEXT: ret i1 [[TMP3]]
121 return __atomic_compare_exchange(&Ptr
, &Exp
, &Des
, 0,
122 memory_order_seq_cst
, memory_order_seq_cst
);
126 // CHECK-NEXT: entry:
127 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
128 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw add ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
129 // CHECK-NEXT: [[TMP2:%.*]] = add i128 [[TMP1]], [[TMP0]]
130 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
131 // CHECK-NEXT: ret void
134 return __atomic_add_fetch(&Ptr
, Val
, memory_order_seq_cst
);
137 // CHECK-LABEL: @f10(
138 // CHECK-NEXT: entry:
139 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
140 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw sub ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
141 // CHECK-NEXT: [[TMP2:%.*]] = sub i128 [[TMP1]], [[TMP0]]
142 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
143 // CHECK-NEXT: ret void
146 return __atomic_sub_fetch(&Ptr
, Val
, memory_order_seq_cst
);
149 // CHECK-LABEL: @f11(
150 // CHECK-NEXT: entry:
151 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
152 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw and ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
153 // CHECK-NEXT: [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
154 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
155 // CHECK-NEXT: ret void
158 return __atomic_and_fetch(&Ptr
, Val
, memory_order_seq_cst
);
161 // CHECK-LABEL: @f12(
162 // CHECK-NEXT: entry:
163 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
164 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xor ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
165 // CHECK-NEXT: [[TMP2:%.*]] = xor i128 [[TMP1]], [[TMP0]]
166 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
167 // CHECK-NEXT: ret void
170 return __atomic_xor_fetch(&Ptr
, Val
, memory_order_seq_cst
);
173 // CHECK-LABEL: @f13(
174 // CHECK-NEXT: entry:
175 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
176 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw or ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
177 // CHECK-NEXT: [[TMP2:%.*]] = or i128 [[TMP1]], [[TMP0]]
178 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
179 // CHECK-NEXT: ret void
182 return __atomic_or_fetch(&Ptr
, Val
, memory_order_seq_cst
);
185 // CHECK-LABEL: @f14(
186 // CHECK-NEXT: entry:
187 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
188 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw nand ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
189 // CHECK-NEXT: [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
190 // CHECK-NEXT: [[TMP3:%.*]] = xor i128 [[TMP2]], -1
191 // CHECK-NEXT: store i128 [[TMP3]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
192 // CHECK-NEXT: ret void
195 return __atomic_nand_fetch(&Ptr
, Val
, memory_order_seq_cst
);
198 // CHECK-LABEL: @f15(
199 // CHECK-NEXT: entry:
200 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
201 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw add ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
202 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
203 // CHECK-NEXT: ret void
206 return __atomic_fetch_add(&Ptr
, Val
, memory_order_seq_cst
);
209 // CHECK-LABEL: @f16(
210 // CHECK-NEXT: entry:
211 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
212 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw sub ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
213 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
214 // CHECK-NEXT: ret void
217 return __atomic_fetch_sub(&Ptr
, Val
, memory_order_seq_cst
);
220 // CHECK-LABEL: @f17(
221 // CHECK-NEXT: entry:
222 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
223 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw and ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
224 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
225 // CHECK-NEXT: ret void
228 return __atomic_fetch_and(&Ptr
, Val
, memory_order_seq_cst
);
231 // CHECK-LABEL: @f18(
232 // CHECK-NEXT: entry:
233 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
234 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xor ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
235 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
236 // CHECK-NEXT: ret void
239 return __atomic_fetch_xor(&Ptr
, Val
, memory_order_seq_cst
);
242 // CHECK-LABEL: @f19(
243 // CHECK-NEXT: entry:
244 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
245 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw or ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
246 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
247 // CHECK-NEXT: ret void
250 return __atomic_fetch_or(&Ptr
, Val
, memory_order_seq_cst
);
253 // CHECK-LABEL: @f20(
254 // CHECK-NEXT: entry:
255 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
256 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw nand ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
257 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
258 // CHECK-NEXT: ret void
261 return __atomic_fetch_nand(&Ptr
, Val
, memory_order_seq_cst
);