[clang] Implement lifetime analysis for lifetime_capture_by(X) (#115921)
[llvm-project.git] / clang / test / CodeGen / SystemZ / gnu-atomic-builtins-i16.c
blob7c6a82f14197a15275307e7f830d40ca7632b7f3
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
3 //
4 // Test GNU atomic builtins for int16_t.
6 #include <stdatomic.h>
7 #include <stdint.h>
9 // CHECK-LABEL: @f1(
10 // CHECK-NEXT: entry:
11 // CHECK-NEXT: [[TMP0:%.*]] = load atomic i16, ptr [[PTR:%.*]] seq_cst, align 2
12 // CHECK-NEXT: ret i16 [[TMP0]]
14 int16_t f1(int16_t *Ptr) {
15 return __atomic_load_n(Ptr, memory_order_seq_cst);
18 // CHECK-LABEL: @f2(
19 // CHECK-NEXT: entry:
20 // CHECK-NEXT: [[TMP0:%.*]] = load atomic i16, ptr [[PTR:%.*]] seq_cst, align 2
21 // CHECK-NEXT: store i16 [[TMP0]], ptr [[RET:%.*]], align 2
22 // CHECK-NEXT: ret i16 [[TMP0]]
24 int16_t f2(int16_t *Ptr, int16_t *Ret) {
25 __atomic_load(Ptr, Ret, memory_order_seq_cst);
26 return *Ret;
29 // CHECK-LABEL: @f3(
30 // CHECK-NEXT: entry:
31 // CHECK-NEXT: store atomic i16 [[VAL:%.*]], ptr [[PTR:%.*]] seq_cst, align 2
32 // CHECK-NEXT: ret void
34 void f3(int16_t *Ptr, int16_t Val) {
35 __atomic_store_n(Ptr, Val, memory_order_seq_cst);
38 // CHECK-LABEL: @f4(
39 // CHECK-NEXT: entry:
40 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[VAL:%.*]], align 2
41 // CHECK-NEXT: store atomic i16 [[TMP0]], ptr [[PTR:%.*]] seq_cst, align 2
42 // CHECK-NEXT: ret void
44 void f4(int16_t *Ptr, int16_t *Val) {
45 __atomic_store(Ptr, Val, memory_order_seq_cst);
48 // CHECK-LABEL: @f5(
49 // CHECK-NEXT: entry:
50 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
51 // CHECK-NEXT: ret i16 [[TMP0]]
53 int16_t f5(int16_t *Ptr, int16_t Val) {
54 return __atomic_exchange_n(Ptr, Val, memory_order_seq_cst);
57 // CHECK-LABEL: @f6(
58 // CHECK-NEXT: entry:
59 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[VAL:%.*]], align 2
60 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i16 [[TMP0]] seq_cst, align 2
61 // CHECK-NEXT: store i16 [[TMP1]], ptr [[RET:%.*]], align 2
62 // CHECK-NEXT: ret i16 [[TMP1]]
64 int16_t f6(int16_t *Ptr, int16_t *Val, int16_t *Ret) {
65 __atomic_exchange(Ptr, Val, Ret, memory_order_seq_cst);
66 return *Ret;
69 // CHECK-LABEL: @f7(
70 // CHECK-NEXT: entry:
71 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[EXP:%.*]], align 2
72 // CHECK-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[PTR:%.*]], i16 [[TMP0]], i16 [[DES:%.*]] seq_cst seq_cst, align 2
73 // CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i16, i1 } [[TMP1]], 1
74 // CHECK-NEXT: br i1 [[TMP2]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
75 // CHECK: cmpxchg.store_expected:
76 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i16, i1 } [[TMP1]], 0
77 // CHECK-NEXT: store i16 [[TMP3]], ptr [[EXP]], align 2
78 // CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
79 // CHECK: cmpxchg.continue:
80 // CHECK-NEXT: ret i1 [[TMP2]]
82 _Bool f7(int16_t *Ptr, int16_t *Exp, int16_t Des) {
83 return __atomic_compare_exchange_n(Ptr, Exp, Des, 0,
84 memory_order_seq_cst, memory_order_seq_cst);
87 // CHECK-LABEL: @f8(
88 // CHECK-NEXT: entry:
89 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[EXP:%.*]], align 2
90 // CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[DES:%.*]], align 2
91 // CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR:%.*]], i16 [[TMP0]], i16 [[TMP1]] seq_cst seq_cst, align 2
92 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i16, i1 } [[TMP2]], 1
93 // CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
94 // CHECK: cmpxchg.store_expected:
95 // CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i16, i1 } [[TMP2]], 0
96 // CHECK-NEXT: store i16 [[TMP4]], ptr [[EXP]], align 2
97 // CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
98 // CHECK: cmpxchg.continue:
99 // CHECK-NEXT: ret i1 [[TMP3]]
101 _Bool f8(int16_t *Ptr, int16_t *Exp, int16_t *Des) {
102 return __atomic_compare_exchange(Ptr, Exp, Des, 0,
103 memory_order_seq_cst, memory_order_seq_cst);
106 // CHECK-LABEL: @f9(
107 // CHECK-NEXT: entry:
108 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
109 // CHECK-NEXT: [[TMP1:%.*]] = add i16 [[TMP0]], [[VAL]]
110 // CHECK-NEXT: ret i16 [[TMP1]]
112 int16_t f9(int16_t *Ptr, int16_t Val) {
113 return __atomic_add_fetch(Ptr, Val, memory_order_seq_cst);
116 // CHECK-LABEL: @f10(
117 // CHECK-NEXT: entry:
118 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
119 // CHECK-NEXT: [[TMP1:%.*]] = sub i16 [[TMP0]], [[VAL]]
120 // CHECK-NEXT: ret i16 [[TMP1]]
122 int16_t f10(int16_t *Ptr, int16_t Val) {
123 return __atomic_sub_fetch(Ptr, Val, memory_order_seq_cst);
126 // CHECK-LABEL: @f11(
127 // CHECK-NEXT: entry:
128 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
129 // CHECK-NEXT: [[TMP1:%.*]] = and i16 [[TMP0]], [[VAL]]
130 // CHECK-NEXT: ret i16 [[TMP1]]
132 int16_t f11(int16_t *Ptr, int16_t Val) {
133 return __atomic_and_fetch(Ptr, Val, memory_order_seq_cst);
136 // CHECK-LABEL: @f12(
137 // CHECK-NEXT: entry:
138 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
139 // CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[TMP0]], [[VAL]]
140 // CHECK-NEXT: ret i16 [[TMP1]]
142 int16_t f12(int16_t *Ptr, int16_t Val) {
143 return __atomic_xor_fetch(Ptr, Val, memory_order_seq_cst);
146 // CHECK-LABEL: @f13(
147 // CHECK-NEXT: entry:
148 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
149 // CHECK-NEXT: [[TMP1:%.*]] = or i16 [[TMP0]], [[VAL]]
150 // CHECK-NEXT: ret i16 [[TMP1]]
152 int16_t f13(int16_t *Ptr, int16_t Val) {
153 return __atomic_or_fetch(Ptr, Val, memory_order_seq_cst);
156 // CHECK-LABEL: @f14(
157 // CHECK-NEXT: entry:
158 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
159 // CHECK-NEXT: [[TMP1:%.*]] = and i16 [[TMP0]], [[VAL]]
160 // CHECK-NEXT: [[TMP2:%.*]] = xor i16 [[TMP1]], -1
161 // CHECK-NEXT: ret i16 [[TMP2]]
163 int16_t f14(int16_t *Ptr, int16_t Val) {
164 return __atomic_nand_fetch(Ptr, Val, memory_order_seq_cst);
167 // CHECK-LABEL: @f15(
168 // CHECK-NEXT: entry:
169 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
170 // CHECK-NEXT: ret i16 [[TMP0]]
172 int16_t f15(int16_t *Ptr, int16_t Val) {
173 return __atomic_fetch_add(Ptr, Val, memory_order_seq_cst);
176 // CHECK-LABEL: @f16(
177 // CHECK-NEXT: entry:
178 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
179 // CHECK-NEXT: ret i16 [[TMP0]]
181 int16_t f16(int16_t *Ptr, int16_t Val) {
182 return __atomic_fetch_sub(Ptr, Val, memory_order_seq_cst);
185 // CHECK-LABEL: @f17(
186 // CHECK-NEXT: entry:
187 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
188 // CHECK-NEXT: ret i16 [[TMP0]]
190 int16_t f17(int16_t *Ptr, int16_t Val) {
191 return __atomic_fetch_and(Ptr, Val, memory_order_seq_cst);
194 // CHECK-LABEL: @f18(
195 // CHECK-NEXT: entry:
196 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
197 // CHECK-NEXT: ret i16 [[TMP0]]
199 int16_t f18(int16_t *Ptr, int16_t Val) {
200 return __atomic_fetch_xor(Ptr, Val, memory_order_seq_cst);
203 // CHECK-LABEL: @f19(
204 // CHECK-NEXT: entry:
205 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
206 // CHECK-NEXT: ret i16 [[TMP0]]
208 int16_t f19(int16_t *Ptr, int16_t Val) {
209 return __atomic_fetch_or(Ptr, Val, memory_order_seq_cst);
212 // CHECK-LABEL: @f20(
213 // CHECK-NEXT: entry:
214 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
215 // CHECK-NEXT: ret i16 [[TMP0]]
217 int16_t f20(int16_t *Ptr, int16_t Val) {
218 return __atomic_fetch_nand(Ptr, Val, memory_order_seq_cst);