1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 3
2 // RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -o - %s | \
3 // RUN: FileCheck %s --check-prefix=CHECK-X86
4 // RUN: %clang_cc1 -triple ppc64le-linux-gnu -emit-llvm -o - %s | FileCheck %s \
5 // RUN: --check-prefix=CHECK-PPC
6 // RUN: %clang_cc1 -triple riscv32-linux-gnu -emit-llvm -o - %s | FileCheck %s \
7 // RUN: --check-prefix=CHECK-RV32
8 // RUN: %clang_cc1 -triple riscv64-linux-gnu -emit-llvm -o - %s | FileCheck %s \
9 // RUN: --check-prefix=CHECK-RV64
12 // Test that we have the structure definition, the gep offsets, the name of the
13 // global, the bit grab, and the icmp correct.
14 extern void a(const char *);
17 // CHECK-X86-LABEL: define dso_local i32 @main(
18 // CHECK-X86-SAME: ) #[[ATTR0:[0-9]+]] {
19 // CHECK-X86-NEXT: entry:
20 // CHECK-X86-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
21 // CHECK-X86-NEXT: store i32 0, ptr [[RETVAL]], align 4
22 // CHECK-X86-NEXT: call void @__cpu_indicator_init()
23 // CHECK-X86-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, ptr @__cpu_model, i32 0, i32 3, i32 0), align 4
24 // CHECK-X86-NEXT: [[TMP1:%.*]] = and i32 [[TMP0]], 256
25 // CHECK-X86-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 256
26 // CHECK-X86-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
27 // CHECK-X86-NEXT: br i1 [[TMP3]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
28 // CHECK-X86: if.then:
29 // CHECK-X86-NEXT: call void @a(ptr noundef @.str)
30 // CHECK-X86-NEXT: br label [[IF_END]]
32 // CHECK-X86-NEXT: [[TMP4:%.*]] = load i32, ptr @__cpu_features2, align 4
33 // CHECK-X86-NEXT: [[TMP5:%.*]] = and i32 [[TMP4]], 1
34 // CHECK-X86-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP5]], 1
35 // CHECK-X86-NEXT: [[TMP7:%.*]] = and i1 true, [[TMP6]]
36 // CHECK-X86-NEXT: br i1 [[TMP7]], label [[IF_THEN1:%.*]], label [[IF_END2:%.*]]
37 // CHECK-X86: if.then1:
38 // CHECK-X86-NEXT: call void @a(ptr noundef @.str.1)
39 // CHECK-X86-NEXT: br label [[IF_END2]]
40 // CHECK-X86: if.end2:
41 // CHECK-X86-NEXT: ret i32 0
46 // CHECK: call void @__cpu_indicator_init
48 if (__builtin_cpu_supports("sse4.2"))
52 if (__builtin_cpu_supports("gfni"))
60 // CHECK-X86-LABEL: define dso_local i32 @baseline(
61 // CHECK-X86-SAME: ) #[[ATTR0]] {
62 // CHECK-X86-NEXT: entry:
63 // CHECK-X86-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr inbounds ([3 x i32], ptr @__cpu_features2, i32 0, i32 1), align 4
64 // CHECK-X86-NEXT: [[TMP1:%.*]] = and i32 [[TMP0]], -2147483648
65 // CHECK-X86-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], -2147483648
66 // CHECK-X86-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
67 // CHECK-X86-NEXT: [[CONV:%.*]] = zext i1 [[TMP3]] to i32
68 // CHECK-X86-NEXT: ret i32 [[CONV]]
70 int baseline() { return __builtin_cpu_supports("x86-64"); }
72 // CHECK-X86-LABEL: define dso_local i32 @v2(
73 // CHECK-X86-SAME: ) #[[ATTR0]] {
74 // CHECK-X86-NEXT: entry:
75 // CHECK-X86-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr inbounds ([3 x i32], ptr @__cpu_features2, i32 0, i32 2), align 4
76 // CHECK-X86-NEXT: [[TMP1:%.*]] = and i32 [[TMP0]], 1
77 // CHECK-X86-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 1
78 // CHECK-X86-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
79 // CHECK-X86-NEXT: [[CONV:%.*]] = zext i1 [[TMP3]] to i32
80 // CHECK-X86-NEXT: ret i32 [[CONV]]
82 int v2() { return __builtin_cpu_supports("x86-64-v2"); }
84 // CHECK-X86-LABEL: define dso_local i32 @v3(
85 // CHECK-X86-SAME: ) #[[ATTR0]] {
86 // CHECK-X86-NEXT: entry:
87 // CHECK-X86-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr inbounds ([3 x i32], ptr @__cpu_features2, i32 0, i32 2), align 4
88 // CHECK-X86-NEXT: [[TMP1:%.*]] = and i32 [[TMP0]], 2
89 // CHECK-X86-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 2
90 // CHECK-X86-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
91 // CHECK-X86-NEXT: [[CONV:%.*]] = zext i1 [[TMP3]] to i32
92 // CHECK-X86-NEXT: ret i32 [[CONV]]
94 int v3() { return __builtin_cpu_supports("x86-64-v3"); }
96 // CHECK-X86-LABEL: define dso_local i32 @v4(
97 // CHECK-X86-SAME: ) #[[ATTR0]] {
98 // CHECK-X86-NEXT: entry:
99 // CHECK-X86-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr inbounds ([3 x i32], ptr @__cpu_features2, i32 0, i32 2), align 4
100 // CHECK-X86-NEXT: [[TMP1:%.*]] = and i32 [[TMP0]], 4
101 // CHECK-X86-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 4
102 // CHECK-X86-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
103 // CHECK-X86-NEXT: [[CONV:%.*]] = zext i1 [[TMP3]] to i32
104 // CHECK-X86-NEXT: ret i32 [[CONV]]
106 int v4() { return __builtin_cpu_supports("x86-64-v4"); }
110 // CHECK-PPC-LABEL: define dso_local signext i32 @test_ppc(
111 // CHECK-PPC-SAME: i32 noundef signext [[A:%.*]]) #[[ATTR0:[0-9]+]] {
112 // CHECK-PPC-NEXT: entry:
113 // CHECK-PPC-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
114 // CHECK-PPC-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
115 // CHECK-PPC-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
116 // CHECK-PPC-NEXT: [[CPU_SUPPORTS:%.*]] = call i32 @llvm.ppc.fixed.addr.ld(i32 2)
117 // CHECK-PPC-NEXT: [[TMP0:%.*]] = and i32 [[CPU_SUPPORTS]], 8388608
118 // CHECK-PPC-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0
119 // CHECK-PPC-NEXT: br i1 [[TMP1]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
120 // CHECK-PPC: if.then:
121 // CHECK-PPC-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR]], align 4
122 // CHECK-PPC-NEXT: store i32 [[TMP2]], ptr [[RETVAL]], align 4
123 // CHECK-PPC-NEXT: br label [[RETURN:%.*]]
124 // CHECK-PPC: if.else:
125 // CHECK-PPC-NEXT: [[CPU_SUPPORTS1:%.*]] = call i32 @llvm.ppc.fixed.addr.ld(i32 1)
126 // CHECK-PPC-NEXT: [[TMP3:%.*]] = and i32 [[CPU_SUPPORTS1]], 67108864
127 // CHECK-PPC-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
128 // CHECK-PPC-NEXT: br i1 [[TMP4]], label [[IF_THEN2:%.*]], label [[IF_ELSE3:%.*]]
129 // CHECK-PPC: if.then2:
130 // CHECK-PPC-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_ADDR]], align 4
131 // CHECK-PPC-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 5
132 // CHECK-PPC-NEXT: store i32 [[SUB]], ptr [[RETVAL]], align 4
133 // CHECK-PPC-NEXT: br label [[RETURN]]
134 // CHECK-PPC: if.else3:
135 // CHECK-PPC-NEXT: [[CPU_IS:%.*]] = call i32 @llvm.ppc.fixed.addr.ld(i32 3)
136 // CHECK-PPC-NEXT: [[TMP6:%.*]] = icmp eq i32 [[CPU_IS]], 39
137 // CHECK-PPC-NEXT: br i1 [[TMP6]], label [[IF_THEN4:%.*]], label [[IF_ELSE5:%.*]]
138 // CHECK-PPC: if.then4:
139 // CHECK-PPC-NEXT: [[TMP7:%.*]] = load i32, ptr [[A_ADDR]], align 4
140 // CHECK-PPC-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
141 // CHECK-PPC-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], [[TMP8]]
142 // CHECK-PPC-NEXT: store i32 [[ADD]], ptr [[RETVAL]], align 4
143 // CHECK-PPC-NEXT: br label [[RETURN]]
144 // CHECK-PPC: if.else5:
145 // CHECK-PPC-NEXT: [[CPU_IS6:%.*]] = call i32 @llvm.ppc.fixed.addr.ld(i32 3)
146 // CHECK-PPC-NEXT: [[TMP9:%.*]] = icmp eq i32 [[CPU_IS6]], 39
147 // CHECK-PPC-NEXT: br i1 [[TMP9]], label [[IF_THEN7:%.*]], label [[IF_ELSE8:%.*]]
148 // CHECK-PPC: if.then7:
149 // CHECK-PPC-NEXT: [[TMP10:%.*]] = load i32, ptr [[A_ADDR]], align 4
150 // CHECK-PPC-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 3
151 // CHECK-PPC-NEXT: store i32 [[MUL]], ptr [[RETVAL]], align 4
152 // CHECK-PPC-NEXT: br label [[RETURN]]
153 // CHECK-PPC: if.else8:
154 // CHECK-PPC-NEXT: [[CPU_IS9:%.*]] = call i32 @llvm.ppc.fixed.addr.ld(i32 3)
155 // CHECK-PPC-NEXT: [[TMP11:%.*]] = icmp eq i32 [[CPU_IS9]], 33
156 // CHECK-PPC-NEXT: br i1 [[TMP11]], label [[IF_THEN10:%.*]], label [[IF_ELSE12:%.*]]
157 // CHECK-PPC: if.then10:
158 // CHECK-PPC-NEXT: [[TMP12:%.*]] = load i32, ptr [[A_ADDR]], align 4
159 // CHECK-PPC-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP12]], 4
160 // CHECK-PPC-NEXT: store i32 [[MUL11]], ptr [[RETVAL]], align 4
161 // CHECK-PPC-NEXT: br label [[RETURN]]
162 // CHECK-PPC: if.else12:
163 // CHECK-PPC-NEXT: [[CPU_IS13:%.*]] = call i32 @llvm.ppc.fixed.addr.ld(i32 3)
164 // CHECK-PPC-NEXT: [[TMP13:%.*]] = icmp eq i32 [[CPU_IS13]], 45
165 // CHECK-PPC-NEXT: br i1 [[TMP13]], label [[IF_THEN14:%.*]], label [[IF_ELSE16:%.*]]
166 // CHECK-PPC: if.then14:
167 // CHECK-PPC-NEXT: [[TMP14:%.*]] = load i32, ptr [[A_ADDR]], align 4
168 // CHECK-PPC-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP14]], 3
169 // CHECK-PPC-NEXT: store i32 [[ADD15]], ptr [[RETVAL]], align 4
170 // CHECK-PPC-NEXT: br label [[RETURN]]
171 // CHECK-PPC: if.else16:
172 // CHECK-PPC-NEXT: [[CPU_IS17:%.*]] = call i32 @llvm.ppc.fixed.addr.ld(i32 3)
173 // CHECK-PPC-NEXT: [[TMP15:%.*]] = icmp eq i32 [[CPU_IS17]], 46
174 // CHECK-PPC-NEXT: br i1 [[TMP15]], label [[IF_THEN18:%.*]], label [[IF_ELSE20:%.*]]
175 // CHECK-PPC: if.then18:
176 // CHECK-PPC-NEXT: [[TMP16:%.*]] = load i32, ptr [[A_ADDR]], align 4
177 // CHECK-PPC-NEXT: [[SUB19:%.*]] = sub nsw i32 [[TMP16]], 3
178 // CHECK-PPC-NEXT: store i32 [[SUB19]], ptr [[RETVAL]], align 4
179 // CHECK-PPC-NEXT: br label [[RETURN]]
180 // CHECK-PPC: if.else20:
181 // CHECK-PPC-NEXT: [[CPU_IS21:%.*]] = call i32 @llvm.ppc.fixed.addr.ld(i32 3)
182 // CHECK-PPC-NEXT: [[TMP17:%.*]] = icmp eq i32 [[CPU_IS21]], 47
183 // CHECK-PPC-NEXT: br i1 [[TMP17]], label [[IF_THEN22:%.*]], label [[IF_ELSE24:%.*]]
184 // CHECK-PPC: if.then22:
185 // CHECK-PPC-NEXT: [[TMP18:%.*]] = load i32, ptr [[A_ADDR]], align 4
186 // CHECK-PPC-NEXT: [[ADD23:%.*]] = add nsw i32 [[TMP18]], 7
187 // CHECK-PPC-NEXT: store i32 [[ADD23]], ptr [[RETVAL]], align 4
188 // CHECK-PPC-NEXT: br label [[RETURN]]
189 // CHECK-PPC: if.else24:
190 // CHECK-PPC-NEXT: [[CPU_IS25:%.*]] = call i32 @llvm.ppc.fixed.addr.ld(i32 3)
191 // CHECK-PPC-NEXT: [[TMP19:%.*]] = icmp eq i32 [[CPU_IS25]], 48
192 // CHECK-PPC-NEXT: br i1 [[TMP19]], label [[IF_THEN26:%.*]], label [[IF_END:%.*]]
193 // CHECK-PPC: if.then26:
194 // CHECK-PPC-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4
195 // CHECK-PPC-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP20]], 7
196 // CHECK-PPC-NEXT: store i32 [[SUB27]], ptr [[RETVAL]], align 4
197 // CHECK-PPC-NEXT: br label [[RETURN]]
198 // CHECK-PPC: if.end:
199 // CHECK-PPC-NEXT: br label [[IF_END28:%.*]]
200 // CHECK-PPC: if.end28:
201 // CHECK-PPC-NEXT: br label [[IF_END29:%.*]]
202 // CHECK-PPC: if.end29:
203 // CHECK-PPC-NEXT: br label [[IF_END30:%.*]]
204 // CHECK-PPC: if.end30:
205 // CHECK-PPC-NEXT: br label [[IF_END31:%.*]]
206 // CHECK-PPC: if.end31:
207 // CHECK-PPC-NEXT: br label [[IF_END32:%.*]]
208 // CHECK-PPC: if.end32:
209 // CHECK-PPC-NEXT: br label [[IF_END33:%.*]]
210 // CHECK-PPC: if.end33:
211 // CHECK-PPC-NEXT: br label [[IF_END34:%.*]]
212 // CHECK-PPC: if.end34:
213 // CHECK-PPC-NEXT: br label [[IF_END35:%.*]]
214 // CHECK-PPC: if.end35:
215 // CHECK-PPC-NEXT: [[TMP21:%.*]] = load i32, ptr [[A_ADDR]], align 4
216 // CHECK-PPC-NEXT: [[ADD36:%.*]] = add nsw i32 [[TMP21]], 5
217 // CHECK-PPC-NEXT: store i32 [[ADD36]], ptr [[RETVAL]], align 4
218 // CHECK-PPC-NEXT: br label [[RETURN]]
219 // CHECK-PPC: return:
220 // CHECK-PPC-NEXT: [[TMP22:%.*]] = load i32, ptr [[RETVAL]], align 4
221 // CHECK-PPC-NEXT: ret i32 [[TMP22]]
223 int test_ppc(int a
) {
224 if (__builtin_cpu_supports("arch_3_00")) // HWCAP2
226 else if (__builtin_cpu_supports("mmu")) // HWCAP
228 else if (__builtin_cpu_is("power7")) // CPUID
230 else if (__builtin_cpu_is("pwr7")) // CPUID
232 else if (__builtin_cpu_is("ppc970")) // CPUID
234 else if (__builtin_cpu_is("power8"))
236 else if (__builtin_cpu_is("power9"))
238 else if (__builtin_cpu_is("power10"))
240 else if (__builtin_cpu_is("power11"))
247 // CHECK-RV32-LABEL: define dso_local i32 @test_riscv(
248 // CHECK-RV32-SAME: i32 noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
249 // CHECK-RV32-NEXT: entry:
250 // CHECK-RV32-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
251 // CHECK-RV32-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
252 // CHECK-RV32-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
253 // CHECK-RV32-NEXT: call void @__init_riscv_feature_bits(ptr null)
254 // CHECK-RV32-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
255 // CHECK-RV32-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 1
256 // CHECK-RV32-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 1
257 // CHECK-RV32-NEXT: br i1 [[TMP2]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
258 // CHECK-RV32: if.then:
259 // CHECK-RV32-NEXT: store i32 3, ptr [[RETVAL]], align 4
260 // CHECK-RV32-NEXT: br label [[RETURN:%.*]]
261 // CHECK-RV32: if.else:
262 // CHECK-RV32-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
263 // CHECK-RV32-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], 4
264 // CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp eq i64 [[TMP4]], 4
265 // CHECK-RV32-NEXT: br i1 [[TMP5]], label [[IF_THEN1:%.*]], label [[IF_ELSE2:%.*]]
266 // CHECK-RV32: if.then1:
267 // CHECK-RV32-NEXT: store i32 7, ptr [[RETVAL]], align 4
268 // CHECK-RV32-NEXT: br label [[RETURN]]
269 // CHECK-RV32: if.else2:
270 // CHECK-RV32-NEXT: [[TMP6:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
271 // CHECK-RV32-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], 2097152
272 // CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 2097152
273 // CHECK-RV32-NEXT: br i1 [[TMP8]], label [[IF_THEN3:%.*]], label [[IF_ELSE4:%.*]]
274 // CHECK-RV32: if.then3:
275 // CHECK-RV32-NEXT: store i32 11, ptr [[RETVAL]], align 4
276 // CHECK-RV32-NEXT: br label [[RETURN]]
277 // CHECK-RV32: if.else4:
278 // CHECK-RV32-NEXT: [[TMP9:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8
279 // CHECK-RV32-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], 8
280 // CHECK-RV32-NEXT: [[TMP11:%.*]] = icmp eq i64 [[TMP10]], 8
281 // CHECK-RV32-NEXT: br i1 [[TMP11]], label [[IF_THEN5:%.*]], label [[IF_END:%.*]]
282 // CHECK-RV32: if.then5:
283 // CHECK-RV32-NEXT: store i32 13, ptr [[RETVAL]], align 4
284 // CHECK-RV32-NEXT: br label [[RETURN]]
285 // CHECK-RV32: if.end:
286 // CHECK-RV32-NEXT: br label [[IF_END6:%.*]]
287 // CHECK-RV32: if.end6:
288 // CHECK-RV32-NEXT: br label [[IF_END7:%.*]]
289 // CHECK-RV32: if.end7:
290 // CHECK-RV32-NEXT: br label [[IF_END8:%.*]]
291 // CHECK-RV32: if.end8:
292 // CHECK-RV32-NEXT: store i32 0, ptr [[RETVAL]], align 4
293 // CHECK-RV32-NEXT: br label [[RETURN]]
294 // CHECK-RV32: return:
295 // CHECK-RV32-NEXT: [[TMP12:%.*]] = load i32, ptr [[RETVAL]], align 4
296 // CHECK-RV32-NEXT: ret i32 [[TMP12]]
298 // CHECK-RV64-LABEL: define dso_local signext i32 @test_riscv(
299 // CHECK-RV64-SAME: i32 noundef signext [[A:%.*]]) #[[ATTR0:[0-9]+]] {
300 // CHECK-RV64-NEXT: entry:
301 // CHECK-RV64-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
302 // CHECK-RV64-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
303 // CHECK-RV64-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
304 // CHECK-RV64-NEXT: call void @__init_riscv_feature_bits(ptr null)
305 // CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
306 // CHECK-RV64-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 1
307 // CHECK-RV64-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 1
308 // CHECK-RV64-NEXT: br i1 [[TMP2]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
309 // CHECK-RV64: if.then:
310 // CHECK-RV64-NEXT: store i32 3, ptr [[RETVAL]], align 4
311 // CHECK-RV64-NEXT: br label [[RETURN:%.*]]
312 // CHECK-RV64: if.else:
313 // CHECK-RV64-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
314 // CHECK-RV64-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], 4
315 // CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp eq i64 [[TMP4]], 4
316 // CHECK-RV64-NEXT: br i1 [[TMP5]], label [[IF_THEN1:%.*]], label [[IF_ELSE2:%.*]]
317 // CHECK-RV64: if.then1:
318 // CHECK-RV64-NEXT: store i32 7, ptr [[RETVAL]], align 4
319 // CHECK-RV64-NEXT: br label [[RETURN]]
320 // CHECK-RV64: if.else2:
321 // CHECK-RV64-NEXT: [[TMP6:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
322 // CHECK-RV64-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], 2097152
323 // CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 2097152
324 // CHECK-RV64-NEXT: br i1 [[TMP8]], label [[IF_THEN3:%.*]], label [[IF_ELSE4:%.*]]
325 // CHECK-RV64: if.then3:
326 // CHECK-RV64-NEXT: store i32 11, ptr [[RETVAL]], align 4
327 // CHECK-RV64-NEXT: br label [[RETURN]]
328 // CHECK-RV64: if.else4:
329 // CHECK-RV64-NEXT: [[TMP9:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8
330 // CHECK-RV64-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], 8
331 // CHECK-RV64-NEXT: [[TMP11:%.*]] = icmp eq i64 [[TMP10]], 8
332 // CHECK-RV64-NEXT: br i1 [[TMP11]], label [[IF_THEN5:%.*]], label [[IF_END:%.*]]
333 // CHECK-RV64: if.then5:
334 // CHECK-RV64-NEXT: store i32 13, ptr [[RETVAL]], align 4
335 // CHECK-RV64-NEXT: br label [[RETURN]]
336 // CHECK-RV64: if.end:
337 // CHECK-RV64-NEXT: br label [[IF_END6:%.*]]
338 // CHECK-RV64: if.end6:
339 // CHECK-RV64-NEXT: br label [[IF_END7:%.*]]
340 // CHECK-RV64: if.end7:
341 // CHECK-RV64-NEXT: br label [[IF_END8:%.*]]
342 // CHECK-RV64: if.end8:
343 // CHECK-RV64-NEXT: store i32 0, ptr [[RETVAL]], align 4
344 // CHECK-RV64-NEXT: br label [[RETURN]]
345 // CHECK-RV64: return:
346 // CHECK-RV64-NEXT: [[TMP12:%.*]] = load i32, ptr [[RETVAL]], align 4
347 // CHECK-RV64-NEXT: ret i32 [[TMP12]]
349 int test_riscv(int a
) {
350 __builtin_cpu_init();
351 if (__builtin_cpu_supports("a"))
353 else if (__builtin_cpu_supports("c"))
355 else if (__builtin_cpu_supports("v"))
357 else if (__builtin_cpu_supports("zcb"))