1 // RUN: %clang_cc1 -emit-llvm -o %t %s
2 // RUN: not grep __builtin %t
3 // RUN: %clang_cc1 -emit-llvm -triple x86_64-darwin-apple -o - %s | FileCheck %s
5 int printf(const char *, ...);
7 void p(char *str
, int x
) {
8 printf("%s: %d\n", str
, x
);
10 void q(char *str
, double x
) {
11 printf("%s: %f\n", str
, x
);
13 void r(char *str
, void *ptr
) {
14 printf("%s: %p\n", str
, ptr
);
22 #define P(n,args) p(#n #args, __builtin_##n args)
23 #define Q(n,args) q(#n #args, __builtin_##n args)
24 #define R(n,args) r(#n #args, __builtin_##n args)
25 #define V(n,args) p(#n #args, (__builtin_##n args, 0))
26 P(types_compatible_p
, (int, float));
27 P(choose_expr
, (0, 10, 20));
28 P(constant_p
, (sizeof(10)));
29 P(expect
, (N
== 12, 0));
32 V(prefetch
, (&N
, 1, 0));
43 P(fpclassify
, (0, 1, 2, 3, 4, 1.0));
44 P(fpclassify
, (0, 1, 2, 3, 4, 1.0f
));
45 P(fpclassify
, (0, 1, 2, 3, 4, 1.0l));
56 P(isgreater
, (1., 2.));
57 P(isgreaterequal
, (1., 2.));
59 P(islessequal
, (1., 2.));
60 P(islessgreater
, (1., 2.));
61 P(isunordered
, (1., 2.));
70 P(isfpclass
, (1., 1));
72 Q(fmaximum_num
, (1.0, 2.0));
73 Q(fmaximum_numf
, (1.0, 2.0));
74 Q(fmaximum_numl
, (1.0, 2.0));
75 Q(fminimum_num
, (1.0, 2.0));
76 Q(fminimum_numf
, (1.0, 2.0));
77 Q(fminimum_numl
, (1.0, 2.0));
79 // Bitwise & Numeric Functions
103 int a
, b
, n
= random(); // Avoid optimizing out.
104 char s0
[10], s1
[] = "Hello";
108 V(strncat
, (s0
, s1
, n
));
110 V(strchr
, (s0
, s1
[0]));
111 V(strrchr
, (s0
, s1
[0]));
113 V(strncpy
, (s0
, s1
, n
));
114 V(sprintf
, (s0
, "%s", s1
));
115 V(snprintf
, (s0
, n
, "%s", s1
));
117 // Object size checking
118 V(__memset_chk
, (s0
, 0, sizeof s0
, n
));
119 V(__memcpy_chk
, (s0
, s1
, sizeof s0
, n
));
120 V(__memmove_chk
, (s0
, s1
, sizeof s0
, n
));
121 V(__mempcpy_chk
, (s0
, s1
, sizeof s0
, n
));
122 V(__strncpy_chk
, (s0
, s1
, sizeof s0
, n
));
123 V(__strcpy_chk
, (s0
, s1
, n
));
125 V(__strcat_chk
, (s0
, s1
, n
));
126 P(object_size
, (s0
, 0));
127 P(object_size
, (s0
, 1));
128 P(object_size
, (s0
, 2));
129 P(object_size
, (s0
, 3));
137 // CHECK: @llvm.bitreverse.i8
138 // CHECK: @llvm.bitreverse.i16
139 // CHECK: @llvm.bitreverse.i32
140 // CHECK: @llvm.bitreverse.i64
142 P(bitreverse16
, (N
));
143 P(bitreverse32
, (N
));
144 P(bitreverse64
, (N
));
147 // V(clear_cache, (&N, &N+1));
149 R(extract_return_addr
, (&N
));
160 __builtin_strcat(0, 0);
163 // CHECK-LABEL: define{{.*}} void @bar(
169 // LLVM's hex representation of float constants is really unfortunate;
170 // basically it does a float-to-double "conversion" and then prints the
171 // hex form of that. That gives us weird artifacts like exponents
172 // that aren't numerically similar to the original exponent and
173 // significand bit-patterns that are offset by three bits (because
174 // the exponent was expanded from 8 bits to 11).
176 // 0xAE98 == 1010111010011000
177 // 0x15D3 == 1010111010011
179 f
= __builtin_huge_valf(); // CHECK: float 0x7FF0000000000000
180 d
= __builtin_huge_val(); // CHECK: double 0x7FF0000000000000
181 ld
= __builtin_huge_vall(); // CHECK: x86_fp80 0xK7FFF8000000000000000
182 f
= __builtin_nanf(""); // CHECK: float 0x7FF8000000000000
183 d
= __builtin_nan(""); // CHECK: double 0x7FF8000000000000
184 ld
= __builtin_nanl(""); // CHECK: x86_fp80 0xK7FFFC000000000000000
185 f
= __builtin_nanf("0xAE98"); // CHECK: float 0x7FF815D300000000
186 d
= __builtin_nan("0xAE98"); // CHECK: double 0x7FF800000000AE98
187 ld
= __builtin_nanl("0xAE98"); // CHECK: x86_fp80 0xK7FFFC00000000000AE98
188 f
= __builtin_nansf(""); // CHECK: float 0x7FF4000000000000
189 d
= __builtin_nans(""); // CHECK: double 0x7FF4000000000000
190 ld
= __builtin_nansl(""); // CHECK: x86_fp80 0xK7FFFA000000000000000
191 f
= __builtin_nansf("0xAE98"); // CHECK: float 0x7FF015D300000000
192 d
= __builtin_nans("0xAE98"); // CHECK: double 0x7FF000000000AE98
193 ld
= __builtin_nansl("0xAE98");// CHECK: x86_fp80 0xK7FFF800000000000AE98
198 // CHECK-LABEL: define{{.*}} void @test_conditional_bzero
199 void test_conditional_bzero(void) {
201 int _sz
= 20, len
= 20;
204 ? __builtin_bzero(dst
, len
)
206 : __builtin_bzero(dst
, len
));
207 // CHECK: call void @llvm.memset
208 // CHECK: call void @llvm.memset
212 // CHECK-LABEL: define{{.*}} void @test_conditional_bcopy
213 void test_conditional_bcopy(void) {
216 int _sz
= 20, len
= 20;
219 ? __builtin_bcopy(src
, dst
, len
)
221 : __builtin_bcopy(src
, dst
, len
));
222 // CHECK: call void @llvm.memmove
223 // CHECK: call void @llvm.memmove
227 // CHECK-LABEL: define{{.*}} void @test_float_builtins
228 void test_float_builtins(__fp16
*H
, float F
, double D
, long double LD
) {
230 res
= __builtin_isinf(*H
);
231 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f16(half {{.*}}, i32 516)
232 // CHECK: zext i1 [[TMP]] to i32
234 res
= __builtin_isinf(F
);
235 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f32(float {{.*}}, i32 516)
236 // CHECK: zext i1 [[TMP]] to i32
238 res
= __builtin_isinf(D
);
239 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f64(double {{.*}}, i32 516)
240 // CHECK: zext i1 [[TMP]] to i32
242 res
= __builtin_isinf(LD
);
243 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 {{.*}}, i32 516)
244 // CHECK: zext i1 [[TMP]] to i32
246 res
= __builtin_isinf_sign(*H
);
247 // CHECK: %[[ABS:.*]] = call half @llvm.fabs.f16(half %[[ARG:.*]])
248 // CHECK: %[[ISINF:.*]] = fcmp oeq half %[[ABS]], 0xH7C00
249 // CHECK: %[[BITCAST:.*]] = bitcast half %[[ARG]] to i16
250 // CHECK: %[[ISNEG:.*]] = icmp slt i16 %[[BITCAST]], 0
251 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
252 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
254 res
= __builtin_isinf_sign(F
);
255 // CHECK: %[[ABS:.*]] = call float @llvm.fabs.f32(float %[[ARG:.*]])
256 // CHECK: %[[ISINF:.*]] = fcmp oeq float %[[ABS]], 0x7FF0000000000000
257 // CHECK: %[[BITCAST:.*]] = bitcast float %[[ARG]] to i32
258 // CHECK: %[[ISNEG:.*]] = icmp slt i32 %[[BITCAST]], 0
259 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
260 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
262 res
= __builtin_isinf_sign(D
);
263 // CHECK: %[[ABS:.*]] = call double @llvm.fabs.f64(double %[[ARG:.*]])
264 // CHECK: %[[ISINF:.*]] = fcmp oeq double %[[ABS]], 0x7FF0000000000000
265 // CHECK: %[[BITCAST:.*]] = bitcast double %[[ARG]] to i64
266 // CHECK: %[[ISNEG:.*]] = icmp slt i64 %[[BITCAST]], 0
267 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
268 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
270 res
= __builtin_isinf_sign(LD
);
271 // CHECK: %[[ABS:.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 %[[ARG:.*]])
272 // CHECK: %[[ISINF:.*]] = fcmp oeq x86_fp80 %[[ABS]], 0xK7FFF8000000000000000
273 // CHECK: %[[BITCAST:.*]] = bitcast x86_fp80 %[[ARG]] to i80
274 // CHECK: %[[ISNEG:.*]] = icmp slt i80 %[[BITCAST]], 0
275 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
276 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
278 res
= __builtin_isfinite(*H
);
279 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f16(half {{.*}}, i32 504)
280 // CHECK: zext i1 [[TMP]] to i32
282 res
= __builtin_isfinite(F
);
283 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f32(float {{.*}}, i32 504)
284 // CHECK: zext i1 [[TMP]] to i32
287 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f64(double {{.*}}, i32 504)
288 // CHECK: zext i1 [[TMP]] to i32
290 res
= __builtin_isnormal(*H
);
291 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f16(half {{.*}}, i32 264)
292 // CHECK: zext i1 [[TMP]] to i32
294 res
= __builtin_isnormal(F
);
295 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f32(float {{.*}}, i32 264)
296 // CHECK: zext i1 [[TMP]] to i32
298 res
= __builtin_issubnormal(F
);
299 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f32(float {{.*}}, i32 144)
300 // CHECK: zext i1 [[TMP]] to i32
302 res
= __builtin_iszero(F
);
303 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f32(float {{.*}}, i32 96)
304 // CHECK: zext i1 [[TMP]] to i32
306 res
= __builtin_issignaling(F
);
307 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f32(float {{.*}}, i32 1)
308 // CHECK: zext i1 [[TMP]] to i32
310 res
= __builtin_flt_rounds();
311 // CHECK: call i32 @llvm.get.rounding(
314 // CHECK-LABEL: define{{.*}} void @test_float_builtin_ops
315 void test_float_builtin_ops(float F
, double D
, long double LD
, int I
) {
317 volatile double resd
;
318 volatile long double resld
;
319 volatile long int resli
;
320 volatile long long int reslli
;
322 resf
= __builtin_fmodf(F
,F
);
325 resd
= __builtin_fmod(D
,D
);
326 // CHECK: frem double
328 resld
= __builtin_fmodl(LD
,LD
);
329 // CHECK: frem x86_fp80
331 resf
= __builtin_fabsf(F
);
332 resd
= __builtin_fabs(D
);
333 resld
= __builtin_fabsl(LD
);
334 // CHECK: call float @llvm.fabs.f32(float
335 // CHECK: call double @llvm.fabs.f64(double
336 // CHECK: call x86_fp80 @llvm.fabs.f80(x86_fp80
338 resf
= __builtin_canonicalizef(F
);
339 resd
= __builtin_canonicalize(D
);
340 resld
= __builtin_canonicalizel(LD
);
341 // CHECK: call float @llvm.canonicalize.f32(float
342 // CHECK: call double @llvm.canonicalize.f64(double
343 // CHECK: call x86_fp80 @llvm.canonicalize.f80(x86_fp80
345 resf
= __builtin_fminf(F
, F
);
346 // CHECK: call float @llvm.minnum.f32
348 resd
= __builtin_fmin(D
, D
);
349 // CHECK: call double @llvm.minnum.f64
351 resld
= __builtin_fminl(LD
, LD
);
352 // CHECK: call x86_fp80 @llvm.minnum.f80
354 resf
= __builtin_fmaxf(F
, F
);
355 // CHECK: call float @llvm.maxnum.f32
357 resd
= __builtin_fmax(D
, D
);
358 // CHECK: call double @llvm.maxnum.f64
360 resld
= __builtin_fmaxl(LD
, LD
);
361 // CHECK: call x86_fp80 @llvm.maxnum.f80
363 resf
= __builtin_fminimum_numf(F
, F
);
364 // CHECK: call float @llvm.minimumnum.f32
366 resf
= __builtin_fminimum_numf(I
, I
);
367 // CHECK: sitofp i32 {{%[0-9]+}} to float
368 // CHECK: sitofp i32 {{%[0-9]+}} to float
369 // CHECK: call float @llvm.minimumnum.f32
371 resf
= __builtin_fminimum_numf(1.0, 2.0);
372 // CHECK: store volatile float 1.000000e+00, ptr %resf
374 resd
= __builtin_fminimum_num(D
, D
);
375 // CHECK: call double @llvm.minimumnum.f64
377 resd
= __builtin_fminimum_num(I
, I
);
378 // CHECK: sitofp i32 {{%[0-9]+}} to double
379 // CHECK: sitofp i32 {{%[0-9]+}} to double
380 // CHECK: call double @llvm.minimumnum.f64
382 resd
= __builtin_fminimum_num(1.0, 2.0);
383 // CHECK: store volatile double 1.000000e+00, ptr %resd
385 //FIXME: __builtin_fminimum_numl is not supported well yet.
386 resld
= __builtin_fminimum_numl(1.0, 2.0);
387 // CHECK: store volatile x86_fp80 0xK3FFF8000000000000000, ptr %resld, align 16
389 resf
= __builtin_fmaximum_numf(F
, F
);
390 // CHECK: call float @llvm.maximumnum.f32
392 resf
= __builtin_fmaximum_numf(I
, I
);
393 // CHECK: sitofp i32 {{%[0-9]+}} to float
394 // CHECK: sitofp i32 {{%[0-9]+}} to float
395 // CHECK: call float @llvm.maximumnum.f32
397 resf
= __builtin_fmaximum_numf(1.0, 2.0);
398 // CHECK: store volatile float 2.000000e+00, ptr %resf
400 resd
= __builtin_fmaximum_num(D
, D
);
401 // CHECK: call double @llvm.maximumnum.f64
403 resd
= __builtin_fmaximum_num(I
, I
);
404 // CHECK: sitofp i32 {{%[0-9]+}} to double
405 // CHECK: sitofp i32 {{%[0-9]+}} to double
406 // CHECK: call double @llvm.maximumnum.f64
408 resd
= __builtin_fmaximum_num(1.0, 2.0);
409 // CHECK: store volatile double 2.000000e+00, ptr %resd
411 //FIXME: __builtin_fmaximum_numl is not supported well yet.
412 resld
= __builtin_fmaximum_numl(1.0, 2.0);
413 // CHECK: store volatile x86_fp80 0xK40008000000000000000, ptr %resld, align 16
415 resf
= __builtin_fabsf(F
);
416 // CHECK: call float @llvm.fabs.f32
418 resd
= __builtin_fabs(D
);
419 // CHECK: call double @llvm.fabs.f64
421 resld
= __builtin_fabsl(LD
);
422 // CHECK: call x86_fp80 @llvm.fabs.f80
424 resf
= __builtin_copysignf(F
, F
);
425 // CHECK: call float @llvm.copysign.f32
427 resd
= __builtin_copysign(D
, D
);
428 // CHECK: call double @llvm.copysign.f64
430 resld
= __builtin_copysignl(LD
, LD
);
431 // CHECK: call x86_fp80 @llvm.copysign.f80
434 resf
= __builtin_ceilf(F
);
435 // CHECK: call float @llvm.ceil.f32
437 resd
= __builtin_ceil(D
);
438 // CHECK: call double @llvm.ceil.f64
440 resld
= __builtin_ceill(LD
);
441 // CHECK: call x86_fp80 @llvm.ceil.f80
443 resf
= __builtin_floorf(F
);
444 // CHECK: call float @llvm.floor.f32
446 resd
= __builtin_floor(D
);
447 // CHECK: call double @llvm.floor.f64
449 resld
= __builtin_floorl(LD
);
450 // CHECK: call x86_fp80 @llvm.floor.f80
452 resf
= __builtin_sqrtf(F
);
453 // CHECK: call float @llvm.sqrt.f32(
455 resd
= __builtin_sqrt(D
);
456 // CHECK: call double @llvm.sqrt.f64(
458 resld
= __builtin_sqrtl(LD
);
459 // CHECK: call x86_fp80 @llvm.sqrt.f80
461 resf
= __builtin_truncf(F
);
462 // CHECK: call float @llvm.trunc.f32
464 resd
= __builtin_trunc(D
);
465 // CHECK: call double @llvm.trunc.f64
467 resld
= __builtin_truncl(LD
);
468 // CHECK: call x86_fp80 @llvm.trunc.f80
470 resf
= __builtin_rintf(F
);
471 // CHECK: call float @llvm.rint.f32
473 resd
= __builtin_rint(D
);
474 // CHECK: call double @llvm.rint.f64
476 resld
= __builtin_rintl(LD
);
477 // CHECK: call x86_fp80 @llvm.rint.f80
479 resf
= __builtin_nearbyintf(F
);
480 // CHECK: call float @llvm.nearbyint.f32
482 resd
= __builtin_nearbyint(D
);
483 // CHECK: call double @llvm.nearbyint.f64
485 resld
= __builtin_nearbyintl(LD
);
486 // CHECK: call x86_fp80 @llvm.nearbyint.f80
488 resf
= __builtin_roundf(F
);
489 // CHECK: call float @llvm.round.f32
491 resd
= __builtin_round(D
);
492 // CHECK: call double @llvm.round.f64
494 resld
= __builtin_roundl(LD
);
495 // CHECK: call x86_fp80 @llvm.round.f80
497 resf
= __builtin_roundevenf(F
);
498 // CHECK: call float @llvm.roundeven.f32
500 resd
= __builtin_roundeven(D
);
501 // CHECK: call double @llvm.roundeven.f64
503 resld
= __builtin_roundevenl(LD
);
504 // CHECK: call x86_fp80 @llvm.roundeven.f80
506 resli
= __builtin_lroundf (F
);
507 // CHECK: call i64 @llvm.lround.i64.f32
509 resli
= __builtin_lround (D
);
510 // CHECK: call i64 @llvm.lround.i64.f64
512 resli
= __builtin_lroundl (LD
);
513 // CHECK: call i64 @llvm.lround.i64.f80
515 resli
= __builtin_lrintf (F
);
516 // CHECK: call i64 @llvm.lrint.i64.f32
518 resli
= __builtin_lrint (D
);
519 // CHECK: call i64 @llvm.lrint.i64.f64
521 resli
= __builtin_lrintl (LD
);
522 // CHECK: call i64 @llvm.lrint.i64.f80
525 // __builtin_longjmp isn't supported on all platforms, so only test it on X86.
528 // CHECK-LABEL: define{{.*}} void @test_builtin_longjmp(ptr{{.*}}
529 void test_builtin_longjmp(void **buffer
) {
530 // CHECK: [[LOAD:%[a-z0-9]+]] = load ptr, ptr
531 // CHECK-NEXT: call void @llvm.eh.sjlj.longjmp(ptr [[LOAD]])
532 __builtin_longjmp(buffer
, 1);
533 // CHECK-NEXT: unreachable
538 // CHECK-LABEL: define{{.*}} void @test_memory_builtins
539 void test_memory_builtins(int n
) {
540 // CHECK: call ptr @malloc
541 void * p
= __builtin_malloc(n
);
542 // CHECK: call void @free
544 // CHECK: call ptr @calloc
545 p
= __builtin_calloc(1, n
);
546 // CHECK: call ptr @realloc
547 p
= __builtin_realloc(p
, n
);
548 // CHECK: call void @free
552 // CHECK-LABEL: define{{.*}} i64 @test_builtin_readcyclecounter
553 long long test_builtin_readcyclecounter(void) {
554 // CHECK: call i64 @llvm.readcyclecounter()
555 return __builtin_readcyclecounter();
558 // CHECK-LABEL: define{{.*}} i64 @test_builtin_readsteadycounter
559 long long test_builtin_readsteadycounter(void) {
560 // CHECK: call i64 @llvm.readsteadycounter()
561 return __builtin_readsteadycounter();
564 /// __builtin_launder should be a NOP in C since there are no vtables.
565 // CHECK-LABEL: define{{.*}} void @test_builtin_launder
566 void test_builtin_launder(int *p
) {
567 // CHECK: [[TMP:%.*]] = load ptr,
568 // CHECK-NOT: @llvm.launder
569 // CHECK: store ptr [[TMP]],
570 int *d
= __builtin_launder(p
);
573 // __warn_memset_zero_len should be NOP, see https://sourceware.org/bugzilla/show_bug.cgi?id=25399
574 // CHECK-LABEL: define{{.*}} void @test___warn_memset_zero_len
575 void test___warn_memset_zero_len(void) {
576 // CHECK-NOT: @__warn_memset_zero_len
577 __warn_memset_zero_len();
580 // Behavior of __builtin_os_log differs between platforms, so only test on X86
583 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log
584 // CHECK: (ptr noundef %[[BUF:.*]], i32 noundef %[[I:.*]], ptr noundef %[[DATA:.*]])
585 void test_builtin_os_log(void *buf
, int i
, const char *data
) {
587 // CHECK: %[[BUF_ADDR:.*]] = alloca ptr, align 8
588 // CHECK: %[[I_ADDR:.*]] = alloca i32, align 4
589 // CHECK: %[[DATA_ADDR:.*]] = alloca ptr, align 8
590 // CHECK: %[[LEN:.*]] = alloca i32, align 4
591 // CHECK: store ptr %[[BUF]], ptr %[[BUF_ADDR]], align 8
592 // CHECK: store i32 %[[I]], ptr %[[I_ADDR]], align 4
593 // CHECK: store ptr %[[DATA]], ptr %[[DATA_ADDR]], align 8
595 // CHECK: store volatile i32 34, ptr %[[LEN]]
596 len
= __builtin_os_log_format_buffer_size("%d %{public}s %{private}.16P", i
, data
, data
);
598 // CHECK: %[[V1:.*]] = load ptr, ptr %[[BUF_ADDR]]
599 // CHECK: %[[V2:.*]] = load i32, ptr %[[I_ADDR]]
600 // CHECK: %[[V3:.*]] = load ptr, ptr %[[DATA_ADDR]]
601 // CHECK: %[[V4:.*]] = ptrtoint ptr %[[V3]] to i64
602 // CHECK: %[[V5:.*]] = load ptr, ptr %[[DATA_ADDR]]
603 // CHECK: %[[V6:.*]] = ptrtoint ptr %[[V5]] to i64
604 // CHECK: call void @__os_log_helper_1_3_4_4_0_8_34_4_17_8_49(ptr noundef %[[V1]], i32 noundef %[[V2]], i64 noundef %[[V4]], i32 noundef 16, i64 noundef %[[V6]])
605 __builtin_os_log_format(buf
, "%d %{public}s %{private}.16P", i
, data
, data
);
607 // privacy annotations aren't recognized when they are preceded or followed
608 // by non-whitespace characters.
610 // CHECK: call void @__os_log_helper_1_2_1_8_32(
611 __builtin_os_log_format(buf
, "%{xyz public}s", data
);
613 // CHECK: call void @__os_log_helper_1_2_1_8_32(
614 __builtin_os_log_format(buf
, "%{ public xyz}s", data
);
616 // CHECK: call void @__os_log_helper_1_2_1_8_32(
617 __builtin_os_log_format(buf
, "%{ public1}s", data
);
619 // Privacy annotations do not have to be in the first comma-delimited string.
621 // CHECK: call void @__os_log_helper_1_2_1_8_34(
622 __builtin_os_log_format(buf
, "%{ xyz, public }s", "abc");
624 // CHECK: call void @__os_log_helper_1_3_1_8_33(
625 __builtin_os_log_format(buf
, "%{ xyz, private }s", "abc");
627 // CHECK: call void @__os_log_helper_1_3_1_8_37(
628 __builtin_os_log_format(buf
, "%{ xyz, sensitive }s", "abc");
630 // The strictest privacy annotation in the string wins.
632 // CHECK: call void @__os_log_helper_1_3_1_8_33(
633 __builtin_os_log_format(buf
, "%{ private, public, private, public}s", "abc");
635 // CHECK: call void @__os_log_helper_1_3_1_8_37(
636 __builtin_os_log_format(buf
, "%{ private, sensitive, private, public}s",
639 // CHECK: store volatile i32 22, ptr %[[LEN]], align 4
640 len
= __builtin_os_log_format_buffer_size("%{mask.xyz}s", "abc");
642 // CHECK: call void @__os_log_helper_1_2_2_8_112_8_34(ptr noundef {{.*}}, i64 noundef 8026488
643 __builtin_os_log_format(buf
, "%{mask.xyz, public}s", "abc");
645 // CHECK: call void @__os_log_helper_1_3_2_8_112_4_1(ptr noundef {{.*}}, i64 noundef 8026488
646 __builtin_os_log_format(buf
, "%{ mask.xyz, private }d", 11);
648 // Mask type is silently ignored.
649 // CHECK: call void @__os_log_helper_1_2_1_8_32(
650 __builtin_os_log_format(buf
, "%{ mask. xyz }s", "abc");
652 // CHECK: call void @__os_log_helper_1_2_1_8_32(
653 __builtin_os_log_format(buf
, "%{ mask.xy z }s", "abc");
656 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_3_4_4_0_8_34_4_17_8_49
657 // CHECK: (ptr noundef %[[BUFFER:.*]], i32 noundef %[[ARG0:.*]], i64 noundef %[[ARG1:.*]], i32 noundef %[[ARG2:.*]], i64 noundef %[[ARG3:.*]])
659 // CHECK: %[[BUFFER_ADDR:.*]] = alloca ptr, align 8
660 // CHECK: %[[ARG0_ADDR:.*]] = alloca i32, align 4
661 // CHECK: %[[ARG1_ADDR:.*]] = alloca i64, align 8
662 // CHECK: %[[ARG2_ADDR:.*]] = alloca i32, align 4
663 // CHECK: %[[ARG3_ADDR:.*]] = alloca i64, align 8
664 // CHECK: store ptr %[[BUFFER]], ptr %[[BUFFER_ADDR]], align 8
665 // CHECK: store i32 %[[ARG0]], ptr %[[ARG0_ADDR]], align 4
666 // CHECK: store i64 %[[ARG1]], ptr %[[ARG1_ADDR]], align 8
667 // CHECK: store i32 %[[ARG2]], ptr %[[ARG2_ADDR]], align 4
668 // CHECK: store i64 %[[ARG3]], ptr %[[ARG3_ADDR]], align 8
669 // CHECK: %[[BUF:.*]] = load ptr, ptr %[[BUFFER_ADDR]], align 8
670 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, ptr %[[BUF]], i64 0
671 // CHECK: store i8 3, ptr %[[SUMMARY]], align 1
672 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, ptr %[[BUF]], i64 1
673 // CHECK: store i8 4, ptr %[[NUMARGS]], align 1
674 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, ptr %[[BUF]], i64 2
675 // CHECK: store i8 0, ptr %[[ARGDESCRIPTOR]], align 1
676 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, ptr %[[BUF]], i64 3
677 // CHECK: store i8 4, ptr %[[ARGSIZE]], align 1
678 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, ptr %[[BUF]], i64 4
679 // CHECK: %[[V0:.*]] = load i32, ptr %[[ARG0_ADDR]], align 4
680 // CHECK: store i32 %[[V0]], ptr %[[ARGDATA]], align 1
681 // CHECK: %[[ARGDESCRIPTOR1:.*]] = getelementptr i8, ptr %[[BUF]], i64 8
682 // CHECK: store i8 34, ptr %[[ARGDESCRIPTOR1]], align 1
683 // CHECK: %[[ARGSIZE2:.*]] = getelementptr i8, ptr %[[BUF]], i64 9
684 // CHECK: store i8 8, ptr %[[ARGSIZE2]], align 1
685 // CHECK: %[[ARGDATA3:.*]] = getelementptr i8, ptr %[[BUF]], i64 10
686 // CHECK: %[[V1:.*]] = load i64, ptr %[[ARG1_ADDR]], align 8
687 // CHECK: store i64 %[[V1]], ptr %[[ARGDATA3]], align 1
688 // CHECK: %[[ARGDESCRIPTOR5:.*]] = getelementptr i8, ptr %[[BUF]], i64 18
689 // CHECK: store i8 17, ptr %[[ARGDESCRIPTOR5]], align 1
690 // CHECK: %[[ARGSIZE6:.*]] = getelementptr i8, ptr %[[BUF]], i64 19
691 // CHECK: store i8 4, ptr %[[ARGSIZE6]], align 1
692 // CHECK: %[[ARGDATA7:.*]] = getelementptr i8, ptr %[[BUF]], i64 20
693 // CHECK: %[[V2:.*]] = load i32, ptr %[[ARG2_ADDR]], align 4
694 // CHECK: store i32 %[[V2]], ptr %[[ARGDATA7]], align 1
695 // CHECK: %[[ARGDESCRIPTOR9:.*]] = getelementptr i8, ptr %[[BUF]], i64 24
696 // CHECK: store i8 49, ptr %[[ARGDESCRIPTOR9]], align 1
697 // CHECK: %[[ARGSIZE10:.*]] = getelementptr i8, ptr %[[BUF]], i64 25
698 // CHECK: store i8 8, ptr %[[ARGSIZE10]], align 1
699 // CHECK: %[[ARGDATA11:.*]] = getelementptr i8, ptr %[[BUF]], i64 26
700 // CHECK: %[[V3:.*]] = load i64, ptr %[[ARG3_ADDR]], align 8
701 // CHECK: store i64 %[[V3]], ptr %[[ARGDATA11]], align 1
703 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_wide
704 // CHECK: (ptr noundef %[[BUF:.*]], ptr noundef %[[DATA:.*]], ptr noundef %[[STR:.*]])
706 void test_builtin_os_log_wide(void *buf
, const char *data
, wchar_t *str
) {
709 // CHECK: %[[BUF_ADDR:.*]] = alloca ptr, align 8
710 // CHECK: %[[DATA_ADDR:.*]] = alloca ptr, align 8
711 // CHECK: %[[STR_ADDR:.*]] = alloca ptr, align 8
712 // CHECK: %[[LEN:.*]] = alloca i32, align 4
713 // CHECK: store ptr %[[BUF]], ptr %[[BUF_ADDR]], align 8
714 // CHECK: store ptr %[[DATA]], ptr %[[DATA_ADDR]], align 8
715 // CHECK: store ptr %[[STR]], ptr %[[STR_ADDR]], align 8
717 // CHECK: store volatile i32 12, ptr %[[LEN]], align 4
718 len
= __builtin_os_log_format_buffer_size("%S", str
);
720 // CHECK: %[[V1:.*]] = load ptr, ptr %[[BUF_ADDR]], align 8
721 // CHECK: %[[V2:.*]] = load ptr, ptr %[[STR_ADDR]], align 8
722 // CHECK: %[[V3:.*]] = ptrtoint ptr %[[V2]] to i64
723 // CHECK: call void @__os_log_helper_1_2_1_8_80(ptr noundef %[[V1]], i64 noundef %[[V3]])
725 __builtin_os_log_format(buf
, "%S", str
);
728 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_1_8_80
729 // CHECK: (ptr noundef %[[BUFFER:.*]], i64 noundef %[[ARG0:.*]])
731 // CHECK: %[[BUFFER_ADDR:.*]] = alloca ptr, align 8
732 // CHECK: %[[ARG0_ADDR:.*]] = alloca i64, align 8
733 // CHECK: store ptr %[[BUFFER]], ptr %[[BUFFER_ADDR]], align 8
734 // CHECK: store i64 %[[ARG0]], ptr %[[ARG0_ADDR]], align 8
735 // CHECK: %[[BUF:.*]] = load ptr, ptr %[[BUFFER_ADDR]], align 8
736 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, ptr %[[BUF]], i64 0
737 // CHECK: store i8 2, ptr %[[SUMMARY]], align 1
738 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, ptr %[[BUF]], i64 1
739 // CHECK: store i8 1, ptr %[[NUMARGS]], align 1
740 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, ptr %[[BUF]], i64 2
741 // CHECK: store i8 80, ptr %[[ARGDESCRIPTOR]], align 1
742 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, ptr %[[BUF]], i64 3
743 // CHECK: store i8 8, ptr %[[ARGSIZE]], align 1
744 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, ptr %[[BUF]], i64 4
745 // CHECK: %[[V0:.*]] = load i64, ptr %[[ARG0_ADDR]], align 8
746 // CHECK: store i64 %[[V0]], ptr %[[ARGDATA]], align 1
748 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_precision_width
749 // CHECK: (ptr noundef %[[BUF:.*]], ptr noundef %[[DATA:.*]], i32 noundef %[[PRECISION:.*]], i32 noundef %[[WIDTH:.*]])
750 void test_builtin_os_log_precision_width(void *buf
, const char *data
,
751 int precision
, int width
) {
753 // CHECK: %[[BUF_ADDR:.*]] = alloca ptr, align 8
754 // CHECK: %[[DATA_ADDR:.*]] = alloca ptr, align 8
755 // CHECK: %[[PRECISION_ADDR:.*]] = alloca i32, align 4
756 // CHECK: %[[WIDTH_ADDR:.*]] = alloca i32, align 4
757 // CHECK: %[[LEN:.*]] = alloca i32, align 4
758 // CHECK: store ptr %[[BUF]], ptr %[[BUF_ADDR]], align 8
759 // CHECK: store ptr %[[DATA]], ptr %[[DATA_ADDR]], align 8
760 // CHECK: store i32 %[[PRECISION]], ptr %[[PRECISION_ADDR]], align 4
761 // CHECK: store i32 %[[WIDTH]], ptr %[[WIDTH_ADDR]], align 4
763 // CHECK: store volatile i32 24, ptr %[[LEN]], align 4
764 len
= __builtin_os_log_format_buffer_size("Hello %*.*s World", precision
, width
, data
);
766 // CHECK: %[[V1:.*]] = load ptr, ptr %[[BUF_ADDR]], align 8
767 // CHECK: %[[V2:.*]] = load i32, ptr %[[PRECISION_ADDR]], align 4
768 // CHECK: %[[V3:.*]] = load i32, ptr %[[WIDTH_ADDR]], align 4
769 // CHECK: %[[V4:.*]] = load ptr, ptr %[[DATA_ADDR]], align 8
770 // CHECK: %[[V5:.*]] = ptrtoint ptr %[[V4]] to i64
771 // CHECK: call void @__os_log_helper_1_2_3_4_0_4_16_8_32(ptr noundef %[[V1]], i32 noundef %[[V2]], i32 noundef %[[V3]], i64 noundef %[[V5]])
772 __builtin_os_log_format(buf
, "Hello %*.*s World", precision
, width
, data
);
775 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_3_4_0_4_16_8_32
776 // CHECK: (ptr noundef %[[BUFFER:.*]], i32 noundef %[[ARG0:.*]], i32 noundef %[[ARG1:.*]], i64 noundef %[[ARG2:.*]])
778 // CHECK: %[[BUFFER_ADDR:.*]] = alloca ptr, align 8
779 // CHECK: %[[ARG0_ADDR:.*]] = alloca i32, align 4
780 // CHECK: %[[ARG1_ADDR:.*]] = alloca i32, align 4
781 // CHECK: %[[ARG2_ADDR:.*]] = alloca i64, align 8
782 // CHECK: store ptr %[[BUFFER]], ptr %[[BUFFER_ADDR]], align 8
783 // CHECK: store i32 %[[ARG0]], ptr %[[ARG0_ADDR]], align 4
784 // CHECK: store i32 %[[ARG1]], ptr %[[ARG1_ADDR]], align 4
785 // CHECK: store i64 %[[ARG2]], ptr %[[ARG2_ADDR]], align 8
786 // CHECK: %[[BUF:.*]] = load ptr, ptr %[[BUFFER_ADDR]], align 8
787 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, ptr %[[BUF]], i64 0
788 // CHECK: store i8 2, ptr %[[SUMMARY]], align 1
789 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, ptr %[[BUF]], i64 1
790 // CHECK: store i8 3, ptr %[[NUMARGS]], align 1
791 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, ptr %[[BUF]], i64 2
792 // CHECK: store i8 0, ptr %[[ARGDESCRIPTOR]], align 1
793 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, ptr %[[BUF]], i64 3
794 // CHECK: store i8 4, ptr %[[ARGSIZE]], align 1
795 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, ptr %[[BUF]], i64 4
796 // CHECK: %[[V0:.*]] = load i32, ptr %[[ARG0_ADDR]], align 4
797 // CHECK: store i32 %[[V0]], ptr %[[ARGDATA]], align 1
798 // CHECK: %[[ARGDESCRIPTOR1:.*]] = getelementptr i8, ptr %[[BUF]], i64 8
799 // CHECK: store i8 16, ptr %[[ARGDESCRIPTOR1]], align 1
800 // CHECK: %[[ARGSIZE2:.*]] = getelementptr i8, ptr %[[BUF]], i64 9
801 // CHECK: store i8 4, ptr %[[ARGSIZE2]], align 1
802 // CHECK: %[[ARGDATA3:.*]] = getelementptr i8, ptr %[[BUF]], i64 10
803 // CHECK: %[[V1:.*]] = load i32, ptr %[[ARG1_ADDR]], align 4
804 // CHECK: store i32 %[[V1]], ptr %[[ARGDATA3]], align 1
805 // CHECK: %[[ARGDESCRIPTOR5:.*]] = getelementptr i8, ptr %[[BUF]], i64 14
806 // CHECK: store i8 32, ptr %[[ARGDESCRIPTOR5]], align 1
807 // CHECK: %[[ARGSIZE6:.*]] = getelementptr i8, ptr %[[BUF]], i64 15
808 // CHECK: store i8 8, ptr %[[ARGSIZE6]], align 1
809 // CHECK: %[[ARGDATA7:.*]] = getelementptr i8, ptr %[[BUF]], i64 16
810 // CHECK: %[[V2:.*]] = load i64, ptr %[[ARG2_ADDR]], align 8
811 // CHECK: store i64 %[[V2]], ptr %[[ARGDATA7]], align 1
813 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_invalid
814 // CHECK: (ptr noundef %[[BUF:.*]], i32 noundef %[[DATA:.*]])
815 void test_builtin_os_log_invalid(void *buf
, int data
) {
817 // CHECK: %[[BUF_ADDR:.*]] = alloca ptr, align 8
818 // CHECK: %[[DATA_ADDR:.*]] = alloca i32, align 4
819 // CHECK: %[[LEN:.*]] = alloca i32, align 4
820 // CHECK: store ptr %[[BUF]], ptr %[[BUF_ADDR]], align 8
821 // CHECK: store i32 %[[DATA]], ptr %[[DATA_ADDR]], align 4
823 // CHECK: store volatile i32 8, ptr %[[LEN]], align 4
824 len
= __builtin_os_log_format_buffer_size("invalid specifier %: %d even a trailing one%", data
);
826 // CHECK: %[[V1:.*]] = load ptr, ptr %[[BUF_ADDR]], align 8
827 // CHECK: %[[V2:.*]] = load i32, ptr %[[DATA_ADDR]], align 4
828 // CHECK: call void @__os_log_helper_1_0_1_4_0(ptr noundef %[[V1]], i32 noundef %[[V2]])
830 __builtin_os_log_format(buf
, "invalid specifier %: %d even a trailing one%", data
);
833 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_0_1_4_0
834 // CHECK: (ptr noundef %[[BUFFER:.*]], i32 noundef %[[ARG0:.*]])
836 // CHECK: %[[BUFFER_ADDR:.*]] = alloca ptr, align 8
837 // CHECK: %[[ARG0_ADDR:.*]] = alloca i32, align 4
838 // CHECK: store ptr %[[BUFFER]], ptr %[[BUFFER_ADDR]], align 8
839 // CHECK: store i32 %[[ARG0]], ptr %[[ARG0_ADDR]], align 4
840 // CHECK: %[[BUF:.*]] = load ptr, ptr %[[BUFFER_ADDR]], align 8
841 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, ptr %[[BUF]], i64 0
842 // CHECK: store i8 0, ptr %[[SUMMARY]], align 1
843 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, ptr %[[BUF]], i64 1
844 // CHECK: store i8 1, ptr %[[NUMARGS]], align 1
845 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, ptr %[[BUF]], i64 2
846 // CHECK: store i8 0, ptr %[[ARGDESCRIPTOR]], align 1
847 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, ptr %[[BUF]], i64 3
848 // CHECK: store i8 4, ptr %[[ARGSIZE]], align 1
849 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, ptr %[[BUF]], i64 4
850 // CHECK: %[[V0:.*]] = load i32, ptr %[[ARG0_ADDR]], align 4
851 // CHECK: store i32 %[[V0]], ptr %[[ARGDATA]], align 1
853 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_percent
854 // CHECK: (ptr noundef %[[BUF:.*]], ptr noundef %[[DATA1:.*]], ptr noundef %[[DATA2:.*]])
855 // Check that the %% which does not consume any argument is correctly handled
856 void test_builtin_os_log_percent(void *buf
, const char *data1
, const char *data2
) {
858 // CHECK: %[[BUF_ADDR:.*]] = alloca ptr, align 8
859 // CHECK: %[[DATA1_ADDR:.*]] = alloca ptr, align 8
860 // CHECK: %[[DATA2_ADDR:.*]] = alloca ptr, align 8
861 // CHECK: %[[LEN:.*]] = alloca i32, align 4
862 // CHECK: store ptr %[[BUF]], ptr %[[BUF_ADDR]], align 8
863 // CHECK: store ptr %[[DATA1]], ptr %[[DATA1_ADDR]], align 8
864 // CHECK: store ptr %[[DATA2]], ptr %[[DATA2_ADDR]], align 8
865 // CHECK: store volatile i32 22, ptr %[[LEN]], align 4
867 len
= __builtin_os_log_format_buffer_size("%s %% %s", data1
, data2
);
869 // CHECK: %[[V1:.*]] = load ptr, ptr %[[BUF_ADDR]], align 8
870 // CHECK: %[[V2:.*]] = load ptr, ptr %[[DATA1_ADDR]], align 8
871 // CHECK: %[[V3:.*]] = ptrtoint ptr %[[V2]] to i64
872 // CHECK: %[[V4:.*]] = load ptr, ptr %[[DATA2_ADDR]], align 8
873 // CHECK: %[[V5:.*]] = ptrtoint ptr %[[V4]] to i64
874 // CHECK: call void @__os_log_helper_1_2_2_8_32_8_32(ptr noundef %[[V1]], i64 noundef %[[V3]], i64 noundef %[[V5]])
876 __builtin_os_log_format(buf
, "%s %% %s", data1
, data2
);
879 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_2_8_32_8_32
880 // CHECK: (ptr noundef %[[BUFFER:.*]], i64 noundef %[[ARG0:.*]], i64 noundef %[[ARG1:.*]])
882 // CHECK: %[[BUFFER_ADDR:.*]] = alloca ptr, align 8
883 // CHECK: %[[ARG0_ADDR:.*]] = alloca i64, align 8
884 // CHECK: %[[ARG1_ADDR:.*]] = alloca i64, align 8
885 // CHECK: store ptr %[[BUFFER]], ptr %[[BUFFER_ADDR]], align 8
886 // CHECK: store i64 %[[ARG0]], ptr %[[ARG0_ADDR]], align 8
887 // CHECK: store i64 %[[ARG1]], ptr %[[ARG1_ADDR]], align 8
888 // CHECK: %[[BUF:.*]] = load ptr, ptr %[[BUFFER_ADDR]], align 8
889 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, ptr %[[BUF]], i64 0
890 // CHECK: store i8 2, ptr %[[SUMMARY]], align 1
891 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, ptr %[[BUF]], i64 1
892 // CHECK: store i8 2, ptr %[[NUMARGS]], align 1
893 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, ptr %[[BUF]], i64 2
894 // CHECK: store i8 32, ptr %[[ARGDESCRIPTOR]], align 1
895 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, ptr %[[BUF]], i64 3
896 // CHECK: store i8 8, ptr %[[ARGSIZE]], align 1
897 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, ptr %[[BUF]], i64 4
898 // CHECK: %[[V0:.*]] = load i64, ptr %[[ARG0_ADDR]], align 8
899 // CHECK: store i64 %[[V0]], ptr %[[ARGDATA]], align 1
900 // CHECK: %[[ARGDESCRIPTOR1:.*]] = getelementptr i8, ptr %[[BUF]], i64 12
901 // CHECK: store i8 32, ptr %[[ARGDESCRIPTOR1]], align 1
902 // CHECK: %[[ARGSIZE2:.*]] = getelementptr i8, ptr %[[BUF]], i64 13
903 // CHECK: store i8 8, ptr %[[ARGSIZE2]], align 1
904 // CHECK: %[[ARGDATA3:.*]] = getelementptr i8, ptr %[[BUF]], i64 14
905 // CHECK: %[[V1:.*]] = load i64, ptr %[[ARG1_ADDR]], align 8
906 // CHECK: store i64 %[[V1]], ptr %[[ARGDATA3]], align 1
908 // Check that the following two functions call the same helper function.
910 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_merge_helper0
911 // CHECK: call void @__os_log_helper_1_0_2_4_0_8_0(
912 void test_builtin_os_log_merge_helper0(void *buf
, int i
, double d
) {
913 __builtin_os_log_format(buf
, "%d %f", i
, d
);
916 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_0_2_4_0_8_0(
918 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_merge_helper1
919 // CHECK: call void @__os_log_helper_1_0_2_4_0_8_0(
920 void test_builtin_os_log_merge_helper1(void *buf
, unsigned u
, long long ll
) {
921 __builtin_os_log_format(buf
, "%u %lld", u
, ll
);
924 // Check that this function doesn't write past the end of array 'buf'.
926 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_errno
927 void test_builtin_os_log_errno(void) {
928 // CHECK-NOT: @stacksave
929 // CHECK: %[[BUF:.*]] = alloca [4 x i8], align 1
930 // CHECK: %[[DECAY:.*]] = getelementptr inbounds [4 x i8], ptr %[[BUF]], i64 0, i64 0
931 // CHECK: call void @__os_log_helper_1_2_1_0_96(ptr noundef %[[DECAY]])
932 // CHECK-NOT: @stackrestore
934 char buf
[__builtin_os_log_format_buffer_size("%m")];
935 __builtin_os_log_format(buf
, "%m");
938 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_1_0_96
939 // CHECK: (ptr noundef %[[BUFFER:.*]])
941 // CHECK: %[[BUFFER_ADDR:.*]] = alloca ptr, align 8
942 // CHECK: store ptr %[[BUFFER]], ptr %[[BUFFER_ADDR]], align 8
943 // CHECK: %[[BUF:.*]] = load ptr, ptr %[[BUFFER_ADDR]], align 8
944 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, ptr %[[BUF]], i64 0
945 // CHECK: store i8 2, ptr %[[SUMMARY]], align 1
946 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, ptr %[[BUF]], i64 1
947 // CHECK: store i8 1, ptr %[[NUMARGS]], align 1
948 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, ptr %[[BUF]], i64 2
949 // CHECK: store i8 96, ptr %[[ARGDESCRIPTOR]], align 1
950 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, ptr %[[BUF]], i64 3
951 // CHECK: store i8 0, ptr %[[ARGSIZE]], align 1
952 // CHECK-NEXT: ret void
954 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_long_double
955 // CHECK: (ptr noundef %[[BUF:.*]], x86_fp80 noundef %[[LD:.*]])
956 void test_builtin_os_log_long_double(void *buf
, long double ld
) {
957 // CHECK: %[[BUF_ADDR:.*]] = alloca ptr, align 8
958 // CHECK: %[[LD_ADDR:.*]] = alloca x86_fp80, align 16
959 // CHECK: %[[COERCE:.*]] = alloca i128, align 16
960 // CHECK: store ptr %[[BUF]], ptr %[[BUF_ADDR]], align 8
961 // CHECK: store x86_fp80 %[[LD]], ptr %[[LD_ADDR]], align 16
962 // CHECK: %[[V0:.*]] = load ptr, ptr %[[BUF_ADDR]], align 8
963 // CHECK: %[[V1:.*]] = load x86_fp80, ptr %[[LD_ADDR]], align 16
964 // CHECK: %[[V2:.*]] = bitcast x86_fp80 %[[V1]] to i80
965 // CHECK: %[[V3:.*]] = zext i80 %[[V2]] to i128
966 // CHECK: store i128 %[[V3]], ptr %[[COERCE]], align 16
967 // CHECK: %[[V5:.*]] = getelementptr inbounds nuw { i64, i64 }, ptr %[[COERCE]], i32 0, i32 0
968 // CHECK: %[[V6:.*]] = load i64, ptr %[[V5]], align 16
969 // CHECK: %[[V7:.*]] = getelementptr inbounds nuw { i64, i64 }, ptr %[[COERCE]], i32 0, i32 1
970 // CHECK: %[[V8:.*]] = load i64, ptr %[[V7]], align 8
971 // CHECK: call void @__os_log_helper_1_0_1_16_0(ptr noundef %[[V0]], i64 noundef %[[V6]], i64 noundef %[[V8]])
973 __builtin_os_log_format(buf
, "%Lf", ld
);
976 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_0_1_16_0
977 // CHECK: (ptr noundef %[[BUFFER:.*]], i64 noundef %[[ARG0_COERCE0:.*]], i64 noundef %[[ARG0_COERCE1:.*]])
979 // CHECK: %[[ARG0:.*]] = alloca i128, align 16
980 // CHECK: %[[BUFFER_ADDR:.*]] = alloca ptr, align 8
981 // CHECK: %[[ARG0_ADDR:.*]] = alloca i128, align 16
982 // CHECK: %[[V1:.*]] = getelementptr inbounds nuw { i64, i64 }, ptr %[[ARG0]], i32 0, i32 0
983 // CHECK: store i64 %[[ARG0_COERCE0]], ptr %[[V1]], align 16
984 // CHECK: %[[V2:.*]] = getelementptr inbounds nuw { i64, i64 }, ptr %[[ARG0]], i32 0, i32 1
985 // CHECK: store i64 %[[ARG0_COERCE1]], ptr %[[V2]], align 8
986 // CHECK: %[[ARG01:.*]] = load i128, ptr %[[ARG0]], align 16
987 // CHECK: store ptr %[[BUFFER]], ptr %[[BUFFER_ADDR]], align 8
988 // CHECK: store i128 %[[ARG01]], ptr %[[ARG0_ADDR]], align 16
989 // CHECK: %[[BUF:.*]] = load ptr, ptr %[[BUFFER_ADDR]], align 8
990 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, ptr %[[BUF]], i64 0
991 // CHECK: store i8 0, ptr %[[SUMMARY]], align 1
992 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, ptr %[[BUF]], i64 1
993 // CHECK: store i8 1, ptr %[[NUMARGS]], align 1
994 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, ptr %[[BUF]], i64 2
995 // CHECK: store i8 0, ptr %[[ARGDESCRIPTOR]], align 1
996 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, ptr %[[BUF]], i64 3
997 // CHECK: store i8 16, ptr %[[ARGSIZE]], align 1
998 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, ptr %[[BUF]], i64 4
999 // CHECK: %[[V3:.*]] = load i128, ptr %[[ARG0_ADDR]], align 16
1000 // CHECK: store i128 %[[V3]], ptr %[[ARGDATA]], align 1
1002 // CHECK-LABEL: define{{.*}} void @test_builtin_popcountg
1003 void test_builtin_popcountg(unsigned char uc
, unsigned short us
,
1004 unsigned int ui
, unsigned long ul
,
1005 unsigned long long ull
, unsigned __int128 ui128
,
1006 unsigned _BitInt(128) ubi128
) {
1008 pop
= __builtin_popcountg(uc
);
1009 // CHECK: %1 = load i8, ptr %uc.addr, align 1
1010 // CHECK-NEXT: %2 = call i8 @llvm.ctpop.i8(i8 %1)
1011 // CHECK-NEXT: %cast = zext i8 %2 to i32
1012 // CHECK-NEXT: store volatile i32 %cast, ptr %pop, align 4
1013 pop
= __builtin_popcountg(us
);
1014 // CHECK-NEXT: %3 = load i16, ptr %us.addr, align 2
1015 // CHECK-NEXT: %4 = call i16 @llvm.ctpop.i16(i16 %3)
1016 // CHECK-NEXT: %cast1 = zext i16 %4 to i32
1017 // CHECK-NEXT: store volatile i32 %cast1, ptr %pop, align 4
1018 pop
= __builtin_popcountg(ui
);
1019 // CHECK-NEXT: %5 = load i32, ptr %ui.addr, align 4
1020 // CHECK-NEXT: %6 = call i32 @llvm.ctpop.i32(i32 %5)
1021 // CHECK-NEXT: store volatile i32 %6, ptr %pop, align 4
1022 pop
= __builtin_popcountg(ul
);
1023 // CHECK-NEXT: %7 = load i64, ptr %ul.addr, align 8
1024 // CHECK-NEXT: %8 = call i64 @llvm.ctpop.i64(i64 %7)
1025 // CHECK-NEXT: %cast2 = trunc i64 %8 to i32
1026 // CHECK-NEXT: store volatile i32 %cast2, ptr %pop, align 4
1027 pop
= __builtin_popcountg(ull
);
1028 // CHECK-NEXT: %9 = load i64, ptr %ull.addr, align 8
1029 // CHECK-NEXT: %10 = call i64 @llvm.ctpop.i64(i64 %9)
1030 // CHECK-NEXT: %cast3 = trunc i64 %10 to i32
1031 // CHECK-NEXT: store volatile i32 %cast3, ptr %pop, align 4
1032 pop
= __builtin_popcountg(ui128
);
1033 // CHECK-NEXT: %11 = load i128, ptr %ui128.addr, align 16
1034 // CHECK-NEXT: %12 = call i128 @llvm.ctpop.i128(i128 %11)
1035 // CHECK-NEXT: %cast4 = trunc i128 %12 to i32
1036 // CHECK-NEXT: store volatile i32 %cast4, ptr %pop, align 4
1037 pop
= __builtin_popcountg(ubi128
);
1038 // CHECK-NEXT: %13 = load i128, ptr %ubi128.addr, align 8
1039 // CHECK-NEXT: %14 = call i128 @llvm.ctpop.i128(i128 %13)
1040 // CHECK-NEXT: %cast5 = trunc i128 %14 to i32
1041 // CHECK-NEXT: store volatile i32 %cast5, ptr %pop, align 4
1042 // CHECK-NEXT: ret void
1045 // CHECK-LABEL: define{{.*}} void @test_builtin_clzg
1046 void test_builtin_clzg(unsigned char uc
, unsigned short us
, unsigned int ui
,
1047 unsigned long ul
, unsigned long long ull
,
1048 unsigned __int128 ui128
, unsigned _BitInt(128) ubi128
,
1049 signed char sc
, short s
, int i
) {
1051 lz
= __builtin_clzg(uc
);
1052 // CHECK: %1 = load i8, ptr %uc.addr, align 1
1053 // CHECK-NEXT: %2 = call i8 @llvm.ctlz.i8(i8 %1, i1 true)
1054 // CHECK-NEXT: %cast = zext i8 %2 to i32
1055 // CHECK-NEXT: store volatile i32 %cast, ptr %lz, align 4
1056 lz
= __builtin_clzg(us
);
1057 // CHECK-NEXT: %3 = load i16, ptr %us.addr, align 2
1058 // CHECK-NEXT: %4 = call i16 @llvm.ctlz.i16(i16 %3, i1 true)
1059 // CHECK-NEXT: %cast1 = zext i16 %4 to i32
1060 // CHECK-NEXT: store volatile i32 %cast1, ptr %lz, align 4
1061 lz
= __builtin_clzg(ui
);
1062 // CHECK-NEXT: %5 = load i32, ptr %ui.addr, align 4
1063 // CHECK-NEXT: %6 = call i32 @llvm.ctlz.i32(i32 %5, i1 true)
1064 // CHECK-NEXT: store volatile i32 %6, ptr %lz, align 4
1065 lz
= __builtin_clzg(ul
);
1066 // CHECK-NEXT: %7 = load i64, ptr %ul.addr, align 8
1067 // CHECK-NEXT: %8 = call i64 @llvm.ctlz.i64(i64 %7, i1 true)
1068 // CHECK-NEXT: %cast2 = trunc i64 %8 to i32
1069 // CHECK-NEXT: store volatile i32 %cast2, ptr %lz, align 4
1070 lz
= __builtin_clzg(ull
);
1071 // CHECK-NEXT: %9 = load i64, ptr %ull.addr, align 8
1072 // CHECK-NEXT: %10 = call i64 @llvm.ctlz.i64(i64 %9, i1 true)
1073 // CHECK-NEXT: %cast3 = trunc i64 %10 to i32
1074 // CHECK-NEXT: store volatile i32 %cast3, ptr %lz, align 4
1075 lz
= __builtin_clzg(ui128
);
1076 // CHECK-NEXT: %11 = load i128, ptr %ui128.addr, align 16
1077 // CHECK-NEXT: %12 = call i128 @llvm.ctlz.i128(i128 %11, i1 true)
1078 // CHECK-NEXT: %cast4 = trunc i128 %12 to i32
1079 // CHECK-NEXT: store volatile i32 %cast4, ptr %lz, align 4
1080 lz
= __builtin_clzg(ubi128
);
1081 // CHECK-NEXT: %13 = load i128, ptr %ubi128.addr, align 8
1082 // CHECK-NEXT: %14 = call i128 @llvm.ctlz.i128(i128 %13, i1 true)
1083 // CHECK-NEXT: %cast5 = trunc i128 %14 to i32
1084 // CHECK-NEXT: store volatile i32 %cast5, ptr %lz, align 4
1085 lz
= __builtin_clzg(uc
, sc
);
1086 // CHECK-NEXT: %15 = load i8, ptr %uc.addr, align 1
1087 // CHECK-NEXT: %16 = call i8 @llvm.ctlz.i8(i8 %15, i1 true)
1088 // CHECK-NEXT: %cast6 = zext i8 %16 to i32
1089 // CHECK-NEXT: %iszero = icmp eq i8 %15, 0
1090 // CHECK-NEXT: %17 = load i8, ptr %sc.addr, align 1
1091 // CHECK-NEXT: %conv = sext i8 %17 to i32
1092 // CHECK-NEXT: %clzg = select i1 %iszero, i32 %conv, i32 %cast6
1093 // CHECK-NEXT: store volatile i32 %clzg, ptr %lz, align 4
1094 lz
= __builtin_clzg(us
, uc
);
1095 // CHECK-NEXT: %18 = load i16, ptr %us.addr, align 2
1096 // CHECK-NEXT: %19 = call i16 @llvm.ctlz.i16(i16 %18, i1 true)
1097 // CHECK-NEXT: %cast7 = zext i16 %19 to i32
1098 // CHECK-NEXT: %iszero8 = icmp eq i16 %18, 0
1099 // CHECK-NEXT: %20 = load i8, ptr %uc.addr, align 1
1100 // CHECK-NEXT: %conv9 = zext i8 %20 to i32
1101 // CHECK-NEXT: %clzg10 = select i1 %iszero8, i32 %conv9, i32 %cast7
1102 // CHECK-NEXT: store volatile i32 %clzg10, ptr %lz, align 4
1103 lz
= __builtin_clzg(ui
, s
);
1104 // CHECK-NEXT: %21 = load i32, ptr %ui.addr, align 4
1105 // CHECK-NEXT: %22 = call i32 @llvm.ctlz.i32(i32 %21, i1 true)
1106 // CHECK-NEXT: %iszero11 = icmp eq i32 %21, 0
1107 // CHECK-NEXT: %23 = load i16, ptr %s.addr, align 2
1108 // CHECK-NEXT: %conv12 = sext i16 %23 to i32
1109 // CHECK-NEXT: %clzg13 = select i1 %iszero11, i32 %conv12, i32 %22
1110 // CHECK-NEXT: store volatile i32 %clzg13, ptr %lz, align 4
1111 lz
= __builtin_clzg(ul
, us
);
1112 // CHECK-NEXT: %24 = load i64, ptr %ul.addr, align 8
1113 // CHECK-NEXT: %25 = call i64 @llvm.ctlz.i64(i64 %24, i1 true)
1114 // CHECK-NEXT: %cast14 = trunc i64 %25 to i32
1115 // CHECK-NEXT: %iszero15 = icmp eq i64 %24, 0
1116 // CHECK-NEXT: %26 = load i16, ptr %us.addr, align 2
1117 // CHECK-NEXT: %conv16 = zext i16 %26 to i32
1118 // CHECK-NEXT: %clzg17 = select i1 %iszero15, i32 %conv16, i32 %cast14
1119 // CHECK-NEXT: store volatile i32 %clzg17, ptr %lz, align 4
1120 lz
= __builtin_clzg(ull
, i
);
1121 // CHECK-NEXT: %27 = load i64, ptr %ull.addr, align 8
1122 // CHECK-NEXT: %28 = call i64 @llvm.ctlz.i64(i64 %27, i1 true)
1123 // CHECK-NEXT: %cast18 = trunc i64 %28 to i32
1124 // CHECK-NEXT: %iszero19 = icmp eq i64 %27, 0
1125 // CHECK-NEXT: %29 = load i32, ptr %i.addr, align 4
1126 // CHECK-NEXT: %clzg20 = select i1 %iszero19, i32 %29, i32 %cast18
1127 // CHECK-NEXT: store volatile i32 %clzg20, ptr %lz, align 4
1128 lz
= __builtin_clzg(ui128
, i
);
1129 // CHECK-NEXT: %30 = load i128, ptr %ui128.addr, align 16
1130 // CHECK-NEXT: %31 = call i128 @llvm.ctlz.i128(i128 %30, i1 true)
1131 // CHECK-NEXT: %cast21 = trunc i128 %31 to i32
1132 // CHECK-NEXT: %iszero22 = icmp eq i128 %30, 0
1133 // CHECK-NEXT: %32 = load i32, ptr %i.addr, align 4
1134 // CHECK-NEXT: %clzg23 = select i1 %iszero22, i32 %32, i32 %cast21
1135 // CHECK-NEXT: store volatile i32 %clzg23, ptr %lz, align 4
1136 lz
= __builtin_clzg(ubi128
, i
);
1137 // CHECK-NEXT: %33 = load i128, ptr %ubi128.addr, align 8
1138 // CHECK-NEXT: %34 = call i128 @llvm.ctlz.i128(i128 %33, i1 true)
1139 // CHECK-NEXT: %cast24 = trunc i128 %34 to i32
1140 // CHECK-NEXT: %iszero25 = icmp eq i128 %33, 0
1141 // CHECK-NEXT: %35 = load i32, ptr %i.addr, align 4
1142 // CHECK-NEXT: %clzg26 = select i1 %iszero25, i32 %35, i32 %cast24
1143 // CHECK-NEXT: store volatile i32 %clzg26, ptr %lz, align 4
1144 // CHECK-NEXT: ret void
1147 // CHECK-LABEL: define{{.*}} void @test_builtin_ctzg
1148 void test_builtin_ctzg(unsigned char uc
, unsigned short us
, unsigned int ui
,
1149 unsigned long ul
, unsigned long long ull
,
1150 unsigned __int128 ui128
, unsigned _BitInt(128) ubi128
,
1151 signed char sc
, short s
, int i
) {
1153 tz
= __builtin_ctzg(uc
);
1154 // CHECK: %1 = load i8, ptr %uc.addr, align 1
1155 // CHECK-NEXT: %2 = call i8 @llvm.cttz.i8(i8 %1, i1 true)
1156 // CHECK-NEXT: %cast = zext i8 %2 to i32
1157 // CHECK-NEXT: store volatile i32 %cast, ptr %tz, align 4
1158 tz
= __builtin_ctzg(us
);
1159 // CHECK-NEXT: %3 = load i16, ptr %us.addr, align 2
1160 // CHECK-NEXT: %4 = call i16 @llvm.cttz.i16(i16 %3, i1 true)
1161 // CHECK-NEXT: %cast1 = zext i16 %4 to i32
1162 // CHECK-NEXT: store volatile i32 %cast1, ptr %tz, align 4
1163 tz
= __builtin_ctzg(ui
);
1164 // CHECK-NEXT: %5 = load i32, ptr %ui.addr, align 4
1165 // CHECK-NEXT: %6 = call i32 @llvm.cttz.i32(i32 %5, i1 true)
1166 // CHECK-NEXT: store volatile i32 %6, ptr %tz, align 4
1167 tz
= __builtin_ctzg(ul
);
1168 // CHECK-NEXT: %7 = load i64, ptr %ul.addr, align 8
1169 // CHECK-NEXT: %8 = call i64 @llvm.cttz.i64(i64 %7, i1 true)
1170 // CHECK-NEXT: %cast2 = trunc i64 %8 to i32
1171 // CHECK-NEXT: store volatile i32 %cast2, ptr %tz, align 4
1172 tz
= __builtin_ctzg(ull
);
1173 // CHECK-NEXT: %9 = load i64, ptr %ull.addr, align 8
1174 // CHECK-NEXT: %10 = call i64 @llvm.cttz.i64(i64 %9, i1 true)
1175 // CHECK-NEXT: %cast3 = trunc i64 %10 to i32
1176 // CHECK-NEXT: store volatile i32 %cast3, ptr %tz, align 4
1177 tz
= __builtin_ctzg(ui128
);
1178 // CHECK-NEXT: %11 = load i128, ptr %ui128.addr, align 16
1179 // CHECK-NEXT: %12 = call i128 @llvm.cttz.i128(i128 %11, i1 true)
1180 // CHECK-NEXT: %cast4 = trunc i128 %12 to i32
1181 // CHECK-NEXT: store volatile i32 %cast4, ptr %tz, align 4
1182 tz
= __builtin_ctzg(ubi128
);
1183 // CHECK-NEXT: %13 = load i128, ptr %ubi128.addr, align 8
1184 // CHECK-NEXT: %14 = call i128 @llvm.cttz.i128(i128 %13, i1 true)
1185 // CHECK-NEXT: %cast5 = trunc i128 %14 to i32
1186 // CHECK-NEXT: store volatile i32 %cast5, ptr %tz, align 4
1187 tz
= __builtin_ctzg(uc
, sc
);
1188 // CHECK-NEXT: %15 = load i8, ptr %uc.addr, align 1
1189 // CHECK-NEXT: %16 = call i8 @llvm.cttz.i8(i8 %15, i1 true)
1190 // CHECK-NEXT: %cast6 = zext i8 %16 to i32
1191 // CHECK-NEXT: %iszero = icmp eq i8 %15, 0
1192 // CHECK-NEXT: %17 = load i8, ptr %sc.addr, align 1
1193 // CHECK-NEXT: %conv = sext i8 %17 to i32
1194 // CHECK-NEXT: %ctzg = select i1 %iszero, i32 %conv, i32 %cast6
1195 // CHECK-NEXT: store volatile i32 %ctzg, ptr %tz, align 4
1196 tz
= __builtin_ctzg(us
, uc
);
1197 // CHECK-NEXT: %18 = load i16, ptr %us.addr, align 2
1198 // CHECK-NEXT: %19 = call i16 @llvm.cttz.i16(i16 %18, i1 true)
1199 // CHECK-NEXT: %cast7 = zext i16 %19 to i32
1200 // CHECK-NEXT: %iszero8 = icmp eq i16 %18, 0
1201 // CHECK-NEXT: %20 = load i8, ptr %uc.addr, align 1
1202 // CHECK-NEXT: %conv9 = zext i8 %20 to i32
1203 // CHECK-NEXT: %ctzg10 = select i1 %iszero8, i32 %conv9, i32 %cast7
1204 // CHECK-NEXT: store volatile i32 %ctzg10, ptr %tz, align 4
1205 tz
= __builtin_ctzg(ui
, s
);
1206 // CHECK-NEXT: %21 = load i32, ptr %ui.addr, align 4
1207 // CHECK-NEXT: %22 = call i32 @llvm.cttz.i32(i32 %21, i1 true)
1208 // CHECK-NEXT: %iszero11 = icmp eq i32 %21, 0
1209 // CHECK-NEXT: %23 = load i16, ptr %s.addr, align 2
1210 // CHECK-NEXT: %conv12 = sext i16 %23 to i32
1211 // CHECK-NEXT: %ctzg13 = select i1 %iszero11, i32 %conv12, i32 %22
1212 // CHECK-NEXT: store volatile i32 %ctzg13, ptr %tz, align 4
1213 tz
= __builtin_ctzg(ul
, us
);
1214 // CHECK-NEXT: %24 = load i64, ptr %ul.addr, align 8
1215 // CHECK-NEXT: %25 = call i64 @llvm.cttz.i64(i64 %24, i1 true)
1216 // CHECK-NEXT: %cast14 = trunc i64 %25 to i32
1217 // CHECK-NEXT: %iszero15 = icmp eq i64 %24, 0
1218 // CHECK-NEXT: %26 = load i16, ptr %us.addr, align 2
1219 // CHECK-NEXT: %conv16 = zext i16 %26 to i32
1220 // CHECK-NEXT: %ctzg17 = select i1 %iszero15, i32 %conv16, i32 %cast14
1221 // CHECK-NEXT: store volatile i32 %ctzg17, ptr %tz, align 4
1222 tz
= __builtin_ctzg(ull
, i
);
1223 // CHECK-NEXT: %27 = load i64, ptr %ull.addr, align 8
1224 // CHECK-NEXT: %28 = call i64 @llvm.cttz.i64(i64 %27, i1 true)
1225 // CHECK-NEXT: %cast18 = trunc i64 %28 to i32
1226 // CHECK-NEXT: %iszero19 = icmp eq i64 %27, 0
1227 // CHECK-NEXT: %29 = load i32, ptr %i.addr, align 4
1228 // CHECK-NEXT: %ctzg20 = select i1 %iszero19, i32 %29, i32 %cast18
1229 // CHECK-NEXT: store volatile i32 %ctzg20, ptr %tz, align 4
1230 tz
= __builtin_ctzg(ui128
, i
);
1231 // CHECK-NEXT: %30 = load i128, ptr %ui128.addr, align 16
1232 // CHECK-NEXT: %31 = call i128 @llvm.cttz.i128(i128 %30, i1 true)
1233 // CHECK-NEXT: %cast21 = trunc i128 %31 to i32
1234 // CHECK-NEXT: %iszero22 = icmp eq i128 %30, 0
1235 // CHECK-NEXT: %32 = load i32, ptr %i.addr, align 4
1236 // CHECK-NEXT: %ctzg23 = select i1 %iszero22, i32 %32, i32 %cast21
1237 // CHECK-NEXT: store volatile i32 %ctzg23, ptr %tz, align 4
1238 tz
= __builtin_ctzg(ubi128
, i
);
1239 // CHECK-NEXT: %33 = load i128, ptr %ubi128.addr, align 8
1240 // CHECK-NEXT: %34 = call i128 @llvm.cttz.i128(i128 %33, i1 true)
1241 // CHECK-NEXT: %cast24 = trunc i128 %34 to i32
1242 // CHECK-NEXT: %iszero25 = icmp eq i128 %33, 0
1243 // CHECK-NEXT: %35 = load i32, ptr %i.addr, align 4
1244 // CHECK-NEXT: %ctzg26 = select i1 %iszero25, i32 %35, i32 %cast24
1245 // CHECK-NEXT: store volatile i32 %ctzg26, ptr %tz, align 4
1246 // CHECK-NEXT: ret void