1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
4 // Test GNU atomic builtins for int64_t.
11 // CHECK-NEXT: [[TMP0:%.*]] = load atomic i64, ptr [[PTR:%.*]] seq_cst, align 8
12 // CHECK-NEXT: ret i64 [[TMP0]]
14 int64_t f1(int64_t *Ptr
) {
15 return __atomic_load_n(Ptr
, memory_order_seq_cst
);
20 // CHECK-NEXT: [[TMP0:%.*]] = load atomic i64, ptr [[PTR:%.*]] seq_cst, align 8
21 // CHECK-NEXT: store i64 [[TMP0]], ptr [[RET:%.*]], align 8
22 // CHECK-NEXT: ret i64 [[TMP0]]
24 int64_t f2(int64_t *Ptr
, int64_t *Ret
) {
25 __atomic_load(Ptr
, Ret
, memory_order_seq_cst
);
31 // CHECK-NEXT: store atomic i64 [[VAL:%.*]], ptr [[PTR:%.*]] seq_cst, align 8
32 // CHECK-NEXT: ret void
34 void f3(int64_t *Ptr
, int64_t Val
) {
35 __atomic_store_n(Ptr
, Val
, memory_order_seq_cst
);
40 // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[VAL:%.*]], align 8
41 // CHECK-NEXT: store atomic i64 [[TMP0]], ptr [[PTR:%.*]] seq_cst, align 8
42 // CHECK-NEXT: ret void
44 void f4(int64_t *Ptr
, int64_t *Val
) {
45 __atomic_store(Ptr
, Val
, memory_order_seq_cst
);
50 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
51 // CHECK-NEXT: ret i64 [[TMP0]]
53 int64_t f5(int64_t *Ptr
, int64_t Val
) {
54 return __atomic_exchange_n(Ptr
, Val
, memory_order_seq_cst
);
59 // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[VAL:%.*]], align 8
60 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i64 [[TMP0]] seq_cst, align 8
61 // CHECK-NEXT: store i64 [[TMP1]], ptr [[RET:%.*]], align 8
62 // CHECK-NEXT: ret i64 [[TMP1]]
64 int64_t f6(int64_t *Ptr
, int64_t *Val
, int64_t *Ret
) {
65 __atomic_exchange(Ptr
, Val
, Ret
, memory_order_seq_cst
);
71 // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[EXP:%.*]], align 8
72 // CHECK-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[PTR:%.*]], i64 [[TMP0]], i64 [[DES:%.*]] seq_cst seq_cst, align 8
73 // CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
74 // CHECK-NEXT: br i1 [[TMP2]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
75 // CHECK: cmpxchg.store_expected:
76 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
77 // CHECK-NEXT: store i64 [[TMP3]], ptr [[EXP]], align 8
78 // CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
79 // CHECK: cmpxchg.continue:
80 // CHECK-NEXT: ret i1 [[TMP2]]
82 _Bool
f7(int64_t *Ptr
, int64_t *Exp
, int64_t Des
) {
83 return __atomic_compare_exchange_n(Ptr
, Exp
, Des
, 0,
84 memory_order_seq_cst
, memory_order_seq_cst
);
89 // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[EXP:%.*]], align 8
90 // CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[DES:%.*]], align 8
91 // CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR:%.*]], i64 [[TMP0]], i64 [[TMP1]] seq_cst seq_cst, align 8
92 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
93 // CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
94 // CHECK: cmpxchg.store_expected:
95 // CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i64, i1 } [[TMP2]], 0
96 // CHECK-NEXT: store i64 [[TMP4]], ptr [[EXP]], align 8
97 // CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
98 // CHECK: cmpxchg.continue:
99 // CHECK-NEXT: ret i1 [[TMP3]]
101 _Bool
f8(int64_t *Ptr
, int64_t *Exp
, int64_t *Des
) {
102 return __atomic_compare_exchange(Ptr
, Exp
, Des
, 0,
103 memory_order_seq_cst
, memory_order_seq_cst
);
107 // CHECK-NEXT: entry:
108 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
109 // CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], [[VAL]]
110 // CHECK-NEXT: ret i64 [[TMP1]]
112 int64_t f9(int64_t *Ptr
, int64_t Val
) {
113 return __atomic_add_fetch(Ptr
, Val
, memory_order_seq_cst
);
116 // CHECK-LABEL: @f10(
117 // CHECK-NEXT: entry:
118 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
119 // CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[VAL]]
120 // CHECK-NEXT: ret i64 [[TMP1]]
122 int64_t f10(int64_t *Ptr
, int64_t Val
) {
123 return __atomic_sub_fetch(Ptr
, Val
, memory_order_seq_cst
);
126 // CHECK-LABEL: @f11(
127 // CHECK-NEXT: entry:
128 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
129 // CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], [[VAL]]
130 // CHECK-NEXT: ret i64 [[TMP1]]
132 int64_t f11(int64_t *Ptr
, int64_t Val
) {
133 return __atomic_and_fetch(Ptr
, Val
, memory_order_seq_cst
);
136 // CHECK-LABEL: @f12(
137 // CHECK-NEXT: entry:
138 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
139 // CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], [[VAL]]
140 // CHECK-NEXT: ret i64 [[TMP1]]
142 int64_t f12(int64_t *Ptr
, int64_t Val
) {
143 return __atomic_xor_fetch(Ptr
, Val
, memory_order_seq_cst
);
146 // CHECK-LABEL: @f13(
147 // CHECK-NEXT: entry:
148 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
149 // CHECK-NEXT: [[TMP1:%.*]] = or i64 [[TMP0]], [[VAL]]
150 // CHECK-NEXT: ret i64 [[TMP1]]
152 int64_t f13(int64_t *Ptr
, int64_t Val
) {
153 return __atomic_or_fetch(Ptr
, Val
, memory_order_seq_cst
);
156 // CHECK-LABEL: @f14(
157 // CHECK-NEXT: entry:
158 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
159 // CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], [[VAL]]
160 // CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], -1
161 // CHECK-NEXT: ret i64 [[TMP2]]
163 int64_t f14(int64_t *Ptr
, int64_t Val
) {
164 return __atomic_nand_fetch(Ptr
, Val
, memory_order_seq_cst
);
167 // CHECK-LABEL: @f15(
168 // CHECK-NEXT: entry:
169 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
170 // CHECK-NEXT: ret i64 [[TMP0]]
172 int64_t f15(int64_t *Ptr
, int64_t Val
) {
173 return __atomic_fetch_add(Ptr
, Val
, memory_order_seq_cst
);
176 // CHECK-LABEL: @f16(
177 // CHECK-NEXT: entry:
178 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
179 // CHECK-NEXT: ret i64 [[TMP0]]
181 int64_t f16(int64_t *Ptr
, int64_t Val
) {
182 return __atomic_fetch_sub(Ptr
, Val
, memory_order_seq_cst
);
185 // CHECK-LABEL: @f17(
186 // CHECK-NEXT: entry:
187 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
188 // CHECK-NEXT: ret i64 [[TMP0]]
190 int64_t f17(int64_t *Ptr
, int64_t Val
) {
191 return __atomic_fetch_and(Ptr
, Val
, memory_order_seq_cst
);
194 // CHECK-LABEL: @f18(
195 // CHECK-NEXT: entry:
196 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
197 // CHECK-NEXT: ret i64 [[TMP0]]
199 int64_t f18(int64_t *Ptr
, int64_t Val
) {
200 return __atomic_fetch_xor(Ptr
, Val
, memory_order_seq_cst
);
203 // CHECK-LABEL: @f19(
204 // CHECK-NEXT: entry:
205 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
206 // CHECK-NEXT: ret i64 [[TMP0]]
208 int64_t f19(int64_t *Ptr
, int64_t Val
) {
209 return __atomic_fetch_or(Ptr
, Val
, memory_order_seq_cst
);
212 // CHECK-LABEL: @f20(
213 // CHECK-NEXT: entry:
214 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
215 // CHECK-NEXT: ret i64 [[TMP0]]
217 int64_t f20(int64_t *Ptr
, int64_t Val
) {
218 return __atomic_fetch_nand(Ptr
, Val
, memory_order_seq_cst
);