[clang] Implement lifetime analysis for lifetime_capture_by(X) (#115921)
[llvm-project.git] / clang / test / CodeGen / SystemZ / gnu-atomic-builtins-i128-8Al.c
blob8759df7b19c6388b73cee6bf9ace1f6b46d3f4ed
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
3 //
4 // Test GNU atomic builtins for __int128 (with default alignment of 8 bytes
5 // only), resulting in libcalls.
7 #include <stdatomic.h>
8 #include <stdint.h>
10 __int128 Ptr;
11 __int128 Ret;
12 __int128 Val;
13 __int128 Exp;
14 __int128 Des;
16 // TODO: This test and several more below have the unnecessary use of an alloca
17 // remaining. This is due to 369c9b7, which changes the behavior of the MemCpyOpt
18 // pass. It seems that a 'writable' attribute should now be added to the argument
19 // in order for this optimization to proceed.
21 // CHECK-LABEL: @f1(
22 // CHECK-NEXT: entry:
23 // CHECK-NEXT: [[TMP0:%.*]] = load atomic i128, ptr @Ptr seq_cst, align 8
24 // CHECK-NEXT: store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2:![0-9]+]]
25 // CHECK-NEXT: ret void
27 __int128 f1() {
28 return __atomic_load_n(&Ptr, memory_order_seq_cst);
31 // CHECK-LABEL: @f2(
32 // CHECK-NEXT: entry:
33 // CHECK-NEXT: [[TMP0:%.*]] = load atomic i128, ptr @Ptr seq_cst, align 8
34 // CHECK-NEXT: store i128 [[TMP0]], ptr @Ret, align 8
35 // CHECK-NEXT: store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
36 // CHECK-NEXT: ret void
38 __int128 f2() {
39 __atomic_load(&Ptr, &Ret, memory_order_seq_cst);
40 return Ret;
43 // CHECK-LABEL: @f3(
44 // CHECK-NEXT: entry:
45 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
46 // CHECK-NEXT: store atomic i128 [[TMP0]], ptr @Ptr seq_cst, align 8
47 // CHECK-NEXT: ret void
49 void f3() {
50 __atomic_store_n(&Ptr, Val, memory_order_seq_cst);
53 // CHECK-LABEL: @f4(
54 // CHECK-NEXT: entry:
55 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8
56 // CHECK-NEXT: store atomic i128 [[TMP0]], ptr @Ptr seq_cst, align 8
57 // CHECK-NEXT: ret void
59 void f4() {
60 __atomic_store(&Ptr, &Val, memory_order_seq_cst);
63 // CHECK-LABEL: @f5(
64 // CHECK-NEXT: entry:
65 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
66 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
67 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
68 // CHECK-NEXT: ret void
70 __int128 f5() {
71 return __atomic_exchange_n(&Ptr, Val, memory_order_seq_cst);
74 // CHECK-LABEL: @f6(
75 // CHECK-NEXT: entry:
76 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8
77 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
78 // CHECK-NEXT: store i128 [[TMP1]], ptr @Ret, align 8
79 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
80 // CHECK-NEXT: ret void
82 __int128 f6() {
83 __atomic_exchange(&Ptr, &Val, &Ret, memory_order_seq_cst);
84 return Ret;
87 // CHECK-LABEL: @f7(
88 // CHECK-NEXT: entry:
89 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Des, align 8, !tbaa [[TBAA2]]
90 // CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @Exp, align 8
91 // CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr @Ptr, i128 [[TMP1]], i128 [[TMP0]] seq_cst seq_cst, align 8
92 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i128, i1 } [[TMP2]], 1
93 // CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
94 // CHECK: cmpxchg.store_expected:
95 // CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i128, i1 } [[TMP2]], 0
96 // CHECK-NEXT: store i128 [[TMP4]], ptr @Exp, align 8
97 // CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
98 // CHECK: cmpxchg.continue:
99 // CHECK-NEXT: ret i1 [[TMP3]]
101 _Bool f7() {
102 return __atomic_compare_exchange_n(&Ptr, &Exp, Des, 0,
103 memory_order_seq_cst, memory_order_seq_cst);
106 // CHECK-LABEL: @f8(
107 // CHECK-NEXT: entry:
108 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Exp, align 8
109 // CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @Des, align 8
110 // CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr @Ptr, i128 [[TMP0]], i128 [[TMP1]] seq_cst seq_cst, align 8
111 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i128, i1 } [[TMP2]], 1
112 // CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
113 // CHECK: cmpxchg.store_expected:
114 // CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i128, i1 } [[TMP2]], 0
115 // CHECK-NEXT: store i128 [[TMP4]], ptr @Exp, align 8
116 // CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
117 // CHECK: cmpxchg.continue:
118 // CHECK-NEXT: ret i1 [[TMP3]]
120 _Bool f8() {
121 return __atomic_compare_exchange(&Ptr, &Exp, &Des, 0,
122 memory_order_seq_cst, memory_order_seq_cst);
125 // CHECK-LABEL: @f9(
126 // CHECK-NEXT: entry:
127 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
128 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw add ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
129 // CHECK-NEXT: [[TMP2:%.*]] = add i128 [[TMP1]], [[TMP0]]
130 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
131 // CHECK-NEXT: ret void
133 __int128 f9() {
134 return __atomic_add_fetch(&Ptr, Val, memory_order_seq_cst);
137 // CHECK-LABEL: @f10(
138 // CHECK-NEXT: entry:
139 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
140 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw sub ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
141 // CHECK-NEXT: [[TMP2:%.*]] = sub i128 [[TMP1]], [[TMP0]]
142 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
143 // CHECK-NEXT: ret void
145 __int128 f10() {
146 return __atomic_sub_fetch(&Ptr, Val, memory_order_seq_cst);
149 // CHECK-LABEL: @f11(
150 // CHECK-NEXT: entry:
151 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
152 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw and ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
153 // CHECK-NEXT: [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
154 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
155 // CHECK-NEXT: ret void
157 __int128 f11() {
158 return __atomic_and_fetch(&Ptr, Val, memory_order_seq_cst);
161 // CHECK-LABEL: @f12(
162 // CHECK-NEXT: entry:
163 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
164 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xor ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
165 // CHECK-NEXT: [[TMP2:%.*]] = xor i128 [[TMP1]], [[TMP0]]
166 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
167 // CHECK-NEXT: ret void
169 __int128 f12() {
170 return __atomic_xor_fetch(&Ptr, Val, memory_order_seq_cst);
173 // CHECK-LABEL: @f13(
174 // CHECK-NEXT: entry:
175 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
176 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw or ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
177 // CHECK-NEXT: [[TMP2:%.*]] = or i128 [[TMP1]], [[TMP0]]
178 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
179 // CHECK-NEXT: ret void
181 __int128 f13() {
182 return __atomic_or_fetch(&Ptr, Val, memory_order_seq_cst);
185 // CHECK-LABEL: @f14(
186 // CHECK-NEXT: entry:
187 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
188 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw nand ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
189 // CHECK-NEXT: [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
190 // CHECK-NEXT: [[TMP3:%.*]] = xor i128 [[TMP2]], -1
191 // CHECK-NEXT: store i128 [[TMP3]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
192 // CHECK-NEXT: ret void
194 __int128 f14() {
195 return __atomic_nand_fetch(&Ptr, Val, memory_order_seq_cst);
198 // CHECK-LABEL: @f15(
199 // CHECK-NEXT: entry:
200 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
201 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw add ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
202 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
203 // CHECK-NEXT: ret void
205 __int128 f15() {
206 return __atomic_fetch_add(&Ptr, Val, memory_order_seq_cst);
209 // CHECK-LABEL: @f16(
210 // CHECK-NEXT: entry:
211 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
212 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw sub ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
213 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
214 // CHECK-NEXT: ret void
216 __int128 f16() {
217 return __atomic_fetch_sub(&Ptr, Val, memory_order_seq_cst);
220 // CHECK-LABEL: @f17(
221 // CHECK-NEXT: entry:
222 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
223 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw and ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
224 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
225 // CHECK-NEXT: ret void
227 __int128 f17() {
228 return __atomic_fetch_and(&Ptr, Val, memory_order_seq_cst);
231 // CHECK-LABEL: @f18(
232 // CHECK-NEXT: entry:
233 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
234 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xor ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
235 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
236 // CHECK-NEXT: ret void
238 __int128 f18() {
239 return __atomic_fetch_xor(&Ptr, Val, memory_order_seq_cst);
242 // CHECK-LABEL: @f19(
243 // CHECK-NEXT: entry:
244 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
245 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw or ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
246 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
247 // CHECK-NEXT: ret void
249 __int128 f19() {
250 return __atomic_fetch_or(&Ptr, Val, memory_order_seq_cst);
253 // CHECK-LABEL: @f20(
254 // CHECK-NEXT: entry:
255 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
256 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw nand ptr @Ptr, i128 [[TMP0]] seq_cst, align 8
257 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
258 // CHECK-NEXT: ret void
260 __int128 f20() {
261 return __atomic_fetch_nand(&Ptr, Val, memory_order_seq_cst);