[AArch64] Add fpext and fpround costs (#119292)
[llvm-project.git] / clang / test / CodeGen / big-atomic-ops.c
blob7ef772027ef8bb2a97ddefb0b6421b0b21616231
1 // RUN: %clang_cc1 %s -emit-llvm -o - -triple=x86_64-apple-macosx10.9.0 | FileCheck %s
2 // REQUIRES: x86-registered-target
3 // Also test serialization of atomic operations here, to avoid duplicating the
4 // test.
5 // RUN: %clang_cc1 %s -emit-pch -o %t -triple=x86_64-apple-macosx10.9.0
6 // RUN: %clang_cc1 %s -include-pch %t -triple=x86_64-apple-macosx10.9.0 -emit-llvm -o - | FileCheck %s
7 #ifndef ALREADY_INCLUDED
8 #define ALREADY_INCLUDED
10 // Basic IRGen tests for __c11_atomic_* and GNU __atomic_*
12 typedef enum memory_order {
13 memory_order_relaxed, memory_order_consume, memory_order_acquire,
14 memory_order_release, memory_order_acq_rel, memory_order_seq_cst
15 } memory_order;
17 int fi1(_Atomic(int) *i) {
18 // CHECK: @fi1
19 // CHECK: load atomic i32, ptr {{.*}} seq_cst, align 4
20 return __c11_atomic_load(i, memory_order_seq_cst);
23 int fi1a(int *i) {
24 // CHECK: @fi1a
25 // CHECK: load atomic i32, ptr {{.*}} seq_cst, align 4
26 int v;
27 __atomic_load(i, &v, memory_order_seq_cst);
28 return v;
31 int fi1b(int *i) {
32 // CHECK: @fi1b
33 // CHECK: load atomic i32, ptr {{.*}} seq_cst, align 4
34 return __atomic_load_n(i, memory_order_seq_cst);
37 void fi2(_Atomic(int) *i) {
38 // CHECK: @fi2
39 // CHECK: store atomic i32 {{.*}} seq_cst, align 4
40 __c11_atomic_store(i, 1, memory_order_seq_cst);
43 void fi2a(int *i) {
44 // CHECK: @fi2a
45 // CHECK: store atomic i32 {{.*}} seq_cst, align 4
46 int v = 1;
47 __atomic_store(i, &v, memory_order_seq_cst);
50 void fi2b(int *i) {
51 // CHECK: @fi2b
52 // CHECK: store atomic i32 {{.*}} seq_cst, align 4
53 __atomic_store_n(i, 1, memory_order_seq_cst);
56 int fi3(_Atomic(int) *i) {
57 // CHECK: @fi3
58 // CHECK: atomicrmw and {{.*}} seq_cst, align 4
59 // CHECK-NOT: and
60 return __c11_atomic_fetch_and(i, 1, memory_order_seq_cst);
63 int fi3a(int *i) {
64 // CHECK: @fi3a
65 // CHECK: atomicrmw xor {{.*}} seq_cst, align 4
66 // CHECK-NOT: xor
67 return __atomic_fetch_xor(i, 1, memory_order_seq_cst);
70 int fi3b(int *i) {
71 // CHECK: @fi3b
72 // CHECK: atomicrmw add {{.*}} seq_cst, align 4
73 // CHECK: add
74 return __atomic_add_fetch(i, 1, memory_order_seq_cst);
77 int fi3c(int *i) {
78 // CHECK: @fi3c
79 // CHECK: atomicrmw nand {{.*}} seq_cst, align 4
80 // CHECK-NOT: and
81 return __atomic_fetch_nand(i, 1, memory_order_seq_cst);
84 int fi3d(int *i) {
85 // CHECK: @fi3d
86 // CHECK: atomicrmw nand {{.*}} seq_cst, align 4
87 // CHECK: and
88 // CHECK: xor
89 return __atomic_nand_fetch(i, 1, memory_order_seq_cst);
92 _Bool fi4(_Atomic(int) *i) {
93 // CHECK: @fi4
94 // CHECK: cmpxchg ptr {{.*}} acquire acquire, align 4
95 int cmp = 0;
96 return __c11_atomic_compare_exchange_strong(i, &cmp, 1, memory_order_acquire, memory_order_acquire);
99 _Bool fi4a(int *i) {
100 // CHECK: @fi4
101 // CHECK: cmpxchg ptr {{.*}} acquire acquire, align 4
102 int cmp = 0;
103 int desired = 1;
104 return __atomic_compare_exchange(i, &cmp, &desired, 0, memory_order_acquire, memory_order_acquire);
107 _Bool fi4b(int *i) {
108 // CHECK: @fi4
109 // CHECK: cmpxchg weak ptr {{.*}} acquire acquire, align 4
110 int cmp = 0;
111 return __atomic_compare_exchange_n(i, &cmp, 1, 1, memory_order_acquire, memory_order_acquire);
114 float ff1(_Atomic(float) *d) {
115 // CHECK: @ff1
116 // CHECK: load atomic i32, ptr {{.*}} monotonic, align 4
117 return __c11_atomic_load(d, memory_order_relaxed);
120 void ff2(_Atomic(float) *d) {
121 // CHECK: @ff2
122 // CHECK: store atomic i32 {{.*}} release, align 4
123 __c11_atomic_store(d, 1, memory_order_release);
126 float ff3(_Atomic(float) *d) {
127 return __c11_atomic_exchange(d, 2, memory_order_seq_cst);
130 int* fp1(_Atomic(int*) *p) {
131 // CHECK: @fp1
132 // CHECK: load atomic i64, ptr {{.*}} seq_cst, align 8
133 return __c11_atomic_load(p, memory_order_seq_cst);
136 int* fp2(_Atomic(int*) *p) {
137 // CHECK: @fp2
138 // CHECK: store i64 4
139 // CHECK: atomicrmw add {{.*}} monotonic, align 8
140 return __c11_atomic_fetch_add(p, 1, memory_order_relaxed);
143 int *fp2a(int **p) {
144 // CHECK: @fp2a
145 // CHECK: store i64 4
146 // CHECK: atomicrmw sub {{.*}} monotonic, align 8
147 // Note, the GNU builtins do not multiply by sizeof(T)!
148 return __atomic_fetch_sub(p, 4, memory_order_relaxed);
151 _Complex float fc(_Atomic(_Complex float) *c) {
152 // CHECK: @fc
153 // CHECK: atomicrmw xchg ptr {{.*}} seq_cst, align 8
154 return __c11_atomic_exchange(c, 2, memory_order_seq_cst);
157 typedef struct X { int x; } X;
158 X fs(_Atomic(X) *c) {
159 // CHECK: @fs
160 // CHECK: atomicrmw xchg ptr {{.*}} seq_cst, align 4
161 return __c11_atomic_exchange(c, (X){2}, memory_order_seq_cst);
164 X fsa(X *c, X *d) {
165 // CHECK: @fsa
166 // CHECK: atomicrmw xchg ptr {{.*}} seq_cst, align 4
167 X ret;
168 __atomic_exchange(c, d, &ret, memory_order_seq_cst);
169 return ret;
172 _Bool fsb(_Bool *c) {
173 // CHECK: @fsb
174 // CHECK: atomicrmw xchg ptr {{.*}} seq_cst, align 1
175 return __atomic_exchange_n(c, 1, memory_order_seq_cst);
178 char flag1;
179 volatile char flag2;
180 void test_and_set(void) {
181 // CHECK: atomicrmw xchg ptr @flag1, i8 1 seq_cst, align 1
182 __atomic_test_and_set(&flag1, memory_order_seq_cst);
183 // CHECK: atomicrmw volatile xchg ptr @flag2, i8 1 acquire, align 1
184 __atomic_test_and_set(&flag2, memory_order_acquire);
185 // CHECK: store atomic volatile i8 0, ptr @flag2 release, align 1
186 __atomic_clear(&flag2, memory_order_release);
187 // CHECK: store atomic i8 0, ptr @flag1 seq_cst, align 1
188 __atomic_clear(&flag1, memory_order_seq_cst);
191 struct Sixteen {
192 char c[16];
193 } sixteen;
194 struct Seventeen {
195 char c[17];
196 } seventeen;
198 int lock_free(struct Incomplete *incomplete) {
199 // CHECK: @lock_free
201 // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 noundef 3, ptr noundef null)
202 __c11_atomic_is_lock_free(3);
204 // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr noundef {{.*}}@sixteen{{.*}})
205 __atomic_is_lock_free(16, &sixteen);
207 // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 noundef 17, ptr noundef {{.*}}@seventeen{{.*}})
208 __atomic_is_lock_free(17, &seventeen);
210 // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 noundef 4, {{.*}})
211 __atomic_is_lock_free(4, incomplete);
213 char cs[20];
214 // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 noundef 4, {{.*}})
215 __atomic_is_lock_free(4, cs+1);
217 // CHECK-NOT: call
218 __atomic_always_lock_free(3, 0);
219 __atomic_always_lock_free(16, 0);
220 __atomic_always_lock_free(17, 0);
221 __atomic_always_lock_free(16, &sixteen);
222 __atomic_always_lock_free(17, &seventeen);
224 int n;
225 __atomic_is_lock_free(4, &n);
227 // CHECK: ret i32 1
228 return __c11_atomic_is_lock_free(sizeof(_Atomic(int)));
231 // Tests for atomic operations on big values. These should call the functions
232 // defined here:
233 // http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#The_Library_interface
235 struct foo {
236 int big[128];
238 struct bar {
239 char c[3];
242 struct bar smallThing, thing1, thing2;
243 struct foo bigThing;
244 _Atomic(struct foo) bigAtomic;
246 void structAtomicStore(void) {
247 // CHECK: @structAtomicStore
248 struct foo f = {0};
249 __c11_atomic_store(&bigAtomic, f, 5);
250 // CHECK: call void @__atomic_store(i64 noundef 512, ptr noundef @bigAtomic,
252 struct bar b = {0};
253 __atomic_store(&smallThing, &b, 5);
254 // CHECK: call void @__atomic_store(i64 noundef 3, ptr noundef @smallThing
256 __atomic_store(&bigThing, &f, 5);
257 // CHECK: call void @__atomic_store(i64 noundef 512, ptr noundef @bigThing
259 void structAtomicLoad(void) {
260 // CHECK: @structAtomicLoad
261 struct foo f = __c11_atomic_load(&bigAtomic, 5);
262 // CHECK: call void @__atomic_load(i64 noundef 512, ptr noundef @bigAtomic,
264 struct bar b;
265 __atomic_load(&smallThing, &b, 5);
266 // CHECK: call void @__atomic_load(i64 noundef 3, ptr noundef @smallThing
268 __atomic_load(&bigThing, &f, 5);
269 // CHECK: call void @__atomic_load(i64 noundef 512, ptr noundef @bigThing
271 struct foo structAtomicExchange(void) {
272 // CHECK: @structAtomicExchange
273 struct foo f = {0};
274 struct foo old;
275 __atomic_exchange(&f, &bigThing, &old, 5);
276 // CHECK: call void @__atomic_exchange(i64 noundef 512, {{.*}}, ptr noundef @bigThing,
278 return __c11_atomic_exchange(&bigAtomic, f, 5);
279 // CHECK: call void @__atomic_exchange(i64 noundef 512, ptr noundef @bigAtomic,
281 int structAtomicCmpExchange(void) {
282 // CHECK: @structAtomicCmpExchange
283 _Bool x = __atomic_compare_exchange(&smallThing, &thing1, &thing2, 1, 5, 5);
284 // CHECK: call zeroext i1 @__atomic_compare_exchange(i64 noundef 3, {{.*}} @smallThing{{.*}} @thing1{{.*}} @thing2
286 struct foo f = {0};
287 struct foo g = {0};
288 g.big[12] = 12;
289 return x & __c11_atomic_compare_exchange_strong(&bigAtomic, &f, g, 5, 5);
290 // CHECK: call zeroext i1 @__atomic_compare_exchange(i64 noundef 512, ptr noundef @bigAtomic,
293 // Check that no atomic operations are used in any initialisation of _Atomic
294 // types.
295 _Atomic(int) atomic_init_i = 42;
297 // CHECK: @atomic_init_foo
298 void atomic_init_foo(void)
300 // CHECK-NOT: }
301 // CHECK-NOT: atomic
302 // CHECK: store
303 _Atomic(int) j = 12;
305 // CHECK-NOT: }
306 // CHECK-NOT: atomic
307 // CHECK: store
308 __c11_atomic_init(&j, 42);
310 // CHECK-NOT: atomic
311 // CHECK: }
314 // Check this doesn't crash
315 // CHECK: @test_atomic_array_param(
316 void test_atomic_array_param(_Atomic(struct foo) a) {
317 test_atomic_array_param(a);
320 #endif