1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
4 // Test GNU atomic builtins for __int128 aligned to 16 bytes, which should be
5 // expanded to LLVM I/R by the front end.
10 __int128 Ptr
__attribute__((aligned(16)));
11 __int128 Ret
__attribute__((aligned(16)));
12 __int128 Val
__attribute__((aligned(16)));
13 __int128 Exp
__attribute__((aligned(16)));
14 __int128 Des
__attribute__((aligned(16)));
18 // CHECK-NEXT: [[TMP0:%.*]] = load atomic i128, ptr @Ptr seq_cst, align 16
19 // CHECK-NEXT: store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2:![0-9]+]]
20 // CHECK-NEXT: ret void
23 return __atomic_load_n(&Ptr
, memory_order_seq_cst
);
28 // CHECK-NEXT: [[TMP0:%.*]] = load atomic i128, ptr @Ptr seq_cst, align 16
29 // CHECK-NEXT: store i128 [[TMP0]], ptr @Ret, align 16
30 // CHECK-NEXT: store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
31 // CHECK-NEXT: ret void
34 __atomic_load(&Ptr
, &Ret
, memory_order_seq_cst
);
40 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
41 // CHECK-NEXT: store atomic i128 [[TMP0]], ptr @Ptr seq_cst, align 16
42 // CHECK-NEXT: ret void
45 __atomic_store_n(&Ptr
, Val
, memory_order_seq_cst
);
50 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16
51 // CHECK-NEXT: store atomic i128 [[TMP0]], ptr @Ptr seq_cst, align 16
52 // CHECK-NEXT: ret void
55 __atomic_store(&Ptr
, &Val
, memory_order_seq_cst
);
60 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
61 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
62 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
63 // CHECK-NEXT: ret void
66 return __atomic_exchange_n(&Ptr
, Val
, memory_order_seq_cst
);
71 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16
72 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
73 // CHECK-NEXT: store i128 [[TMP1]], ptr @Ret, align 16
74 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
75 // CHECK-NEXT: ret void
78 __atomic_exchange(&Ptr
, &Val
, &Ret
, memory_order_seq_cst
);
84 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Des, align 16, !tbaa [[TBAA2]]
85 // CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @Exp, align 16
86 // CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr @Ptr, i128 [[TMP1]], i128 [[TMP0]] seq_cst seq_cst, align 16
87 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i128, i1 } [[TMP2]], 1
88 // CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
89 // CHECK: cmpxchg.store_expected:
90 // CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i128, i1 } [[TMP2]], 0
91 // CHECK-NEXT: store i128 [[TMP4]], ptr @Exp, align 16
92 // CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
93 // CHECK: cmpxchg.continue:
94 // CHECK-NEXT: ret i1 [[TMP3]]
97 return __atomic_compare_exchange_n(&Ptr
, &Exp
, Des
, 0,
98 memory_order_seq_cst
, memory_order_seq_cst
);
102 // CHECK-NEXT: entry:
103 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Exp, align 16
104 // CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @Des, align 16
105 // CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr @Ptr, i128 [[TMP0]], i128 [[TMP1]] seq_cst seq_cst, align 16
106 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i128, i1 } [[TMP2]], 1
107 // CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
108 // CHECK: cmpxchg.store_expected:
109 // CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i128, i1 } [[TMP2]], 0
110 // CHECK-NEXT: store i128 [[TMP4]], ptr @Exp, align 16
111 // CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
112 // CHECK: cmpxchg.continue:
113 // CHECK-NEXT: ret i1 [[TMP3]]
116 return __atomic_compare_exchange(&Ptr
, &Exp
, &Des
, 0,
117 memory_order_seq_cst
, memory_order_seq_cst
);
121 // CHECK-NEXT: entry:
122 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
123 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw add ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
124 // CHECK-NEXT: [[TMP2:%.*]] = add i128 [[TMP1]], [[TMP0]]
125 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
126 // CHECK-NEXT: ret void
129 return __atomic_add_fetch(&Ptr
, Val
, memory_order_seq_cst
);
132 // CHECK-LABEL: @f10(
133 // CHECK-NEXT: entry:
134 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
135 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw sub ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
136 // CHECK-NEXT: [[TMP2:%.*]] = sub i128 [[TMP1]], [[TMP0]]
137 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
138 // CHECK-NEXT: ret void
141 return __atomic_sub_fetch(&Ptr
, Val
, memory_order_seq_cst
);
144 // CHECK-LABEL: @f11(
145 // CHECK-NEXT: entry:
146 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
147 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw and ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
148 // CHECK-NEXT: [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
149 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
150 // CHECK-NEXT: ret void
153 return __atomic_and_fetch(&Ptr
, Val
, memory_order_seq_cst
);
156 // CHECK-LABEL: @f12(
157 // CHECK-NEXT: entry:
158 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
159 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xor ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
160 // CHECK-NEXT: [[TMP2:%.*]] = xor i128 [[TMP1]], [[TMP0]]
161 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
162 // CHECK-NEXT: ret void
165 return __atomic_xor_fetch(&Ptr
, Val
, memory_order_seq_cst
);
168 // CHECK-LABEL: @f13(
169 // CHECK-NEXT: entry:
170 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
171 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw or ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
172 // CHECK-NEXT: [[TMP2:%.*]] = or i128 [[TMP1]], [[TMP0]]
173 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
174 // CHECK-NEXT: ret void
177 return __atomic_or_fetch(&Ptr
, Val
, memory_order_seq_cst
);
180 // CHECK-LABEL: @f14(
181 // CHECK-NEXT: entry:
182 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
183 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw nand ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
184 // CHECK-NEXT: [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
185 // CHECK-NEXT: [[TMP3:%.*]] = xor i128 [[TMP2]], -1
186 // CHECK-NEXT: store i128 [[TMP3]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
187 // CHECK-NEXT: ret void
190 return __atomic_nand_fetch(&Ptr
, Val
, memory_order_seq_cst
);
193 // CHECK-LABEL: @f15(
194 // CHECK-NEXT: entry:
195 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
196 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw add ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
197 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
198 // CHECK-NEXT: ret void
201 return __atomic_fetch_add(&Ptr
, Val
, memory_order_seq_cst
);
204 // CHECK-LABEL: @f16(
205 // CHECK-NEXT: entry:
206 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
207 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw sub ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
208 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
209 // CHECK-NEXT: ret void
212 return __atomic_fetch_sub(&Ptr
, Val
, memory_order_seq_cst
);
215 // CHECK-LABEL: @f17(
216 // CHECK-NEXT: entry:
217 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
218 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw and ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
219 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
220 // CHECK-NEXT: ret void
223 return __atomic_fetch_and(&Ptr
, Val
, memory_order_seq_cst
);
226 // CHECK-LABEL: @f18(
227 // CHECK-NEXT: entry:
228 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
229 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xor ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
230 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
231 // CHECK-NEXT: ret void
234 return __atomic_fetch_xor(&Ptr
, Val
, memory_order_seq_cst
);
237 // CHECK-LABEL: @f19(
238 // CHECK-NEXT: entry:
239 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
240 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw or ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
241 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
242 // CHECK-NEXT: ret void
245 return __atomic_fetch_or(&Ptr
, Val
, memory_order_seq_cst
);
248 // CHECK-LABEL: @f20(
249 // CHECK-NEXT: entry:
250 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
251 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw nand ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
252 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
253 // CHECK-NEXT: ret void
256 return __atomic_fetch_nand(&Ptr
, Val
, memory_order_seq_cst
);