Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / builtins.c
blobce1182b724dcc21af36a7a211b7e91e8cf04e1b2
1 // RUN: %clang_cc1 -emit-llvm -o %t %s
2 // RUN: not grep __builtin %t
3 // RUN: %clang_cc1 -emit-llvm -triple x86_64-darwin-apple -o - %s | FileCheck %s
5 int printf(const char *, ...);
7 void p(char *str, int x) {
8 printf("%s: %d\n", str, x);
10 void q(char *str, double x) {
11 printf("%s: %f\n", str, x);
13 void r(char *str, void *ptr) {
14 printf("%s: %p\n", str, ptr);
17 int random(void);
18 int finite(double);
20 int main(void) {
21 int N = random();
22 #define P(n,args) p(#n #args, __builtin_##n args)
23 #define Q(n,args) q(#n #args, __builtin_##n args)
24 #define R(n,args) r(#n #args, __builtin_##n args)
25 #define V(n,args) p(#n #args, (__builtin_##n args, 0))
26 P(types_compatible_p, (int, float));
27 P(choose_expr, (0, 10, 20));
28 P(constant_p, (sizeof(10)));
29 P(expect, (N == 12, 0));
30 V(prefetch, (&N));
31 V(prefetch, (&N, 1));
32 V(prefetch, (&N, 1, 0));
34 // Numeric Constants
36 Q(huge_val, ());
37 Q(huge_valf, ());
38 Q(huge_vall, ());
39 Q(inf, ());
40 Q(inff, ());
41 Q(infl, ());
43 P(fpclassify, (0, 1, 2, 3, 4, 1.0));
44 P(fpclassify, (0, 1, 2, 3, 4, 1.0f));
45 P(fpclassify, (0, 1, 2, 3, 4, 1.0l));
47 Q(nan, (""));
48 Q(nanf, (""));
49 Q(nanl, (""));
50 Q(nans, (""));
51 Q(nan, ("10"));
52 Q(nanf, ("10"));
53 Q(nanl, ("10"));
54 Q(nans, ("10"));
56 P(isgreater, (1., 2.));
57 P(isgreaterequal, (1., 2.));
58 P(isless, (1., 2.));
59 P(islessequal, (1., 2.));
60 P(islessgreater, (1., 2.));
61 P(isunordered, (1., 2.));
63 P(isinf, (1.));
64 P(isinf_sign, (1.));
65 P(isnan, (1.));
66 P(isfinite, (1.));
67 P(iszero, (1.));
68 P(issubnormal, (1.));
69 P(issignaling, (1.));
70 P(isfpclass, (1., 1));
72 // Bitwise & Numeric Functions
74 P(abs, (N));
76 P(clz, (N));
77 P(clzl, (N));
78 P(clzll, (N));
79 P(ctz, (N));
80 P(ctzl, (N));
81 P(ctzll, (N));
82 P(ffs, (N));
83 P(ffsl, (N));
84 P(ffsll, (N));
85 P(parity, (N));
86 P(parityl, (N));
87 P(parityll, (N));
88 P(popcount, (N));
89 P(popcountl, (N));
90 P(popcountll, (N));
91 Q(powi, (1.2f, N));
92 Q(powif, (1.2f, N));
93 Q(powil, (1.2f, N));
95 // Lib functions
96 int a, b, n = random(); // Avoid optimizing out.
97 char s0[10], s1[] = "Hello";
98 V(strcat, (s0, s1));
99 V(strcmp, (s0, s1));
100 V(strdup, (s0));
101 V(strncat, (s0, s1, n));
102 V(strndup, (s0, n));
103 V(strchr, (s0, s1[0]));
104 V(strrchr, (s0, s1[0]));
105 V(strcpy, (s0, s1));
106 V(strncpy, (s0, s1, n));
107 V(sprintf, (s0, "%s", s1));
108 V(snprintf, (s0, n, "%s", s1));
110 // Object size checking
111 V(__memset_chk, (s0, 0, sizeof s0, n));
112 V(__memcpy_chk, (s0, s1, sizeof s0, n));
113 V(__memmove_chk, (s0, s1, sizeof s0, n));
114 V(__mempcpy_chk, (s0, s1, sizeof s0, n));
115 V(__strncpy_chk, (s0, s1, sizeof s0, n));
116 V(__strcpy_chk, (s0, s1, n));
117 s0[0] = 0;
118 V(__strcat_chk, (s0, s1, n));
119 P(object_size, (s0, 0));
120 P(object_size, (s0, 1));
121 P(object_size, (s0, 2));
122 P(object_size, (s0, 3));
124 // Whatever
126 P(bswap16, (N));
127 P(bswap32, (N));
128 P(bswap64, (N));
130 // CHECK: @llvm.bitreverse.i8
131 // CHECK: @llvm.bitreverse.i16
132 // CHECK: @llvm.bitreverse.i32
133 // CHECK: @llvm.bitreverse.i64
134 P(bitreverse8, (N));
135 P(bitreverse16, (N));
136 P(bitreverse32, (N));
137 P(bitreverse64, (N));
139 // FIXME
140 // V(clear_cache, (&N, &N+1));
141 V(trap, ());
142 R(extract_return_addr, (&N));
143 P(signbit, (1.0));
145 R(launder, (&N));
147 return 0;
152 void foo(void) {
153 __builtin_strcat(0, 0);
156 // CHECK-LABEL: define{{.*}} void @bar(
157 void bar(void) {
158 float f;
159 double d;
160 long double ld;
162 // LLVM's hex representation of float constants is really unfortunate;
163 // basically it does a float-to-double "conversion" and then prints the
164 // hex form of that. That gives us weird artifacts like exponents
165 // that aren't numerically similar to the original exponent and
166 // significand bit-patterns that are offset by three bits (because
167 // the exponent was expanded from 8 bits to 11).
169 // 0xAE98 == 1010111010011000
170 // 0x15D3 == 1010111010011
172 f = __builtin_huge_valf(); // CHECK: float 0x7FF0000000000000
173 d = __builtin_huge_val(); // CHECK: double 0x7FF0000000000000
174 ld = __builtin_huge_vall(); // CHECK: x86_fp80 0xK7FFF8000000000000000
175 f = __builtin_nanf(""); // CHECK: float 0x7FF8000000000000
176 d = __builtin_nan(""); // CHECK: double 0x7FF8000000000000
177 ld = __builtin_nanl(""); // CHECK: x86_fp80 0xK7FFFC000000000000000
178 f = __builtin_nanf("0xAE98"); // CHECK: float 0x7FF815D300000000
179 d = __builtin_nan("0xAE98"); // CHECK: double 0x7FF800000000AE98
180 ld = __builtin_nanl("0xAE98"); // CHECK: x86_fp80 0xK7FFFC00000000000AE98
181 f = __builtin_nansf(""); // CHECK: float 0x7FF4000000000000
182 d = __builtin_nans(""); // CHECK: double 0x7FF4000000000000
183 ld = __builtin_nansl(""); // CHECK: x86_fp80 0xK7FFFA000000000000000
184 f = __builtin_nansf("0xAE98"); // CHECK: float 0x7FF015D300000000
185 d = __builtin_nans("0xAE98"); // CHECK: double 0x7FF000000000AE98
186 ld = __builtin_nansl("0xAE98");// CHECK: x86_fp80 0xK7FFF800000000000AE98
189 // CHECK: }
191 // CHECK-LABEL: define{{.*}} void @test_conditional_bzero
192 void test_conditional_bzero(void) {
193 char dst[20];
194 int _sz = 20, len = 20;
195 return (_sz
196 ? ((_sz >= len)
197 ? __builtin_bzero(dst, len)
198 : foo())
199 : __builtin_bzero(dst, len));
200 // CHECK: call void @llvm.memset
201 // CHECK: call void @llvm.memset
202 // CHECK-NOT: phi
205 // CHECK-LABEL: define{{.*}} void @test_float_builtins
206 void test_float_builtins(__fp16 *H, float F, double D, long double LD) {
207 volatile int res;
208 res = __builtin_isinf(*H);
209 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f16(half {{.*}}, i32 516)
210 // CHECK: zext i1 [[TMP]] to i32
212 res = __builtin_isinf(F);
213 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f32(float {{.*}}, i32 516)
214 // CHECK: zext i1 [[TMP]] to i32
216 res = __builtin_isinf(D);
217 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f64(double {{.*}}, i32 516)
218 // CHECK: zext i1 [[TMP]] to i32
220 res = __builtin_isinf(LD);
221 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 {{.*}}, i32 516)
222 // CHECK: zext i1 [[TMP]] to i32
224 res = __builtin_isinf_sign(*H);
225 // CHECK: %[[ABS:.*]] = call half @llvm.fabs.f16(half %[[ARG:.*]])
226 // CHECK: %[[ISINF:.*]] = fcmp oeq half %[[ABS]], 0xH7C00
227 // CHECK: %[[BITCAST:.*]] = bitcast half %[[ARG]] to i16
228 // CHECK: %[[ISNEG:.*]] = icmp slt i16 %[[BITCAST]], 0
229 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
230 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
232 res = __builtin_isinf_sign(F);
233 // CHECK: %[[ABS:.*]] = call float @llvm.fabs.f32(float %[[ARG:.*]])
234 // CHECK: %[[ISINF:.*]] = fcmp oeq float %[[ABS]], 0x7FF0000000000000
235 // CHECK: %[[BITCAST:.*]] = bitcast float %[[ARG]] to i32
236 // CHECK: %[[ISNEG:.*]] = icmp slt i32 %[[BITCAST]], 0
237 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
238 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
240 res = __builtin_isinf_sign(D);
241 // CHECK: %[[ABS:.*]] = call double @llvm.fabs.f64(double %[[ARG:.*]])
242 // CHECK: %[[ISINF:.*]] = fcmp oeq double %[[ABS]], 0x7FF0000000000000
243 // CHECK: %[[BITCAST:.*]] = bitcast double %[[ARG]] to i64
244 // CHECK: %[[ISNEG:.*]] = icmp slt i64 %[[BITCAST]], 0
245 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
246 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
248 res = __builtin_isinf_sign(LD);
249 // CHECK: %[[ABS:.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 %[[ARG:.*]])
250 // CHECK: %[[ISINF:.*]] = fcmp oeq x86_fp80 %[[ABS]], 0xK7FFF8000000000000000
251 // CHECK: %[[BITCAST:.*]] = bitcast x86_fp80 %[[ARG]] to i80
252 // CHECK: %[[ISNEG:.*]] = icmp slt i80 %[[BITCAST]], 0
253 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
254 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
256 res = __builtin_isfinite(*H);
257 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f16(half {{.*}}, i32 504)
258 // CHECK: zext i1 [[TMP]] to i32
260 res = __builtin_isfinite(F);
261 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f32(float {{.*}}, i32 504)
262 // CHECK: zext i1 [[TMP]] to i32
264 res = finite(D);
265 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f64(double {{.*}}, i32 504)
266 // CHECK: zext i1 [[TMP]] to i32
268 res = __builtin_isnormal(*H);
269 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f16(half {{.*}}, i32 264)
270 // CHECK: zext i1 [[TMP]] to i32
272 res = __builtin_isnormal(F);
273 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f32(float {{.*}}, i32 264)
274 // CHECK: zext i1 [[TMP]] to i32
276 res = __builtin_issubnormal(F);
277 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f32(float {{.*}}, i32 144)
278 // CHECK: zext i1 [[TMP]] to i32
280 res = __builtin_iszero(F);
281 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f32(float {{.*}}, i32 96)
282 // CHECK: zext i1 [[TMP]] to i32
284 res = __builtin_issignaling(F);
285 // CHECK: [[TMP:%.*]] = call i1 @llvm.is.fpclass.f32(float {{.*}}, i32 1)
286 // CHECK: zext i1 [[TMP]] to i32
288 res = __builtin_flt_rounds();
289 // CHECK: call i32 @llvm.get.rounding(
292 // CHECK-LABEL: define{{.*}} void @test_float_builtin_ops
293 void test_float_builtin_ops(float F, double D, long double LD) {
294 volatile float resf;
295 volatile double resd;
296 volatile long double resld;
297 volatile long int resli;
298 volatile long long int reslli;
300 resf = __builtin_fmodf(F,F);
301 // CHECK: frem float
303 resd = __builtin_fmod(D,D);
304 // CHECK: frem double
306 resld = __builtin_fmodl(LD,LD);
307 // CHECK: frem x86_fp80
309 resf = __builtin_fabsf(F);
310 resd = __builtin_fabs(D);
311 resld = __builtin_fabsl(LD);
312 // CHECK: call float @llvm.fabs.f32(float
313 // CHECK: call double @llvm.fabs.f64(double
314 // CHECK: call x86_fp80 @llvm.fabs.f80(x86_fp80
316 resf = __builtin_canonicalizef(F);
317 resd = __builtin_canonicalize(D);
318 resld = __builtin_canonicalizel(LD);
319 // CHECK: call float @llvm.canonicalize.f32(float
320 // CHECK: call double @llvm.canonicalize.f64(double
321 // CHECK: call x86_fp80 @llvm.canonicalize.f80(x86_fp80
323 resf = __builtin_fminf(F, F);
324 // CHECK: call float @llvm.minnum.f32
326 resd = __builtin_fmin(D, D);
327 // CHECK: call double @llvm.minnum.f64
329 resld = __builtin_fminl(LD, LD);
330 // CHECK: call x86_fp80 @llvm.minnum.f80
332 resf = __builtin_fmaxf(F, F);
333 // CHECK: call float @llvm.maxnum.f32
335 resd = __builtin_fmax(D, D);
336 // CHECK: call double @llvm.maxnum.f64
338 resld = __builtin_fmaxl(LD, LD);
339 // CHECK: call x86_fp80 @llvm.maxnum.f80
341 resf = __builtin_fabsf(F);
342 // CHECK: call float @llvm.fabs.f32
344 resd = __builtin_fabs(D);
345 // CHECK: call double @llvm.fabs.f64
347 resld = __builtin_fabsl(LD);
348 // CHECK: call x86_fp80 @llvm.fabs.f80
350 resf = __builtin_copysignf(F, F);
351 // CHECK: call float @llvm.copysign.f32
353 resd = __builtin_copysign(D, D);
354 // CHECK: call double @llvm.copysign.f64
356 resld = __builtin_copysignl(LD, LD);
357 // CHECK: call x86_fp80 @llvm.copysign.f80
360 resf = __builtin_ceilf(F);
361 // CHECK: call float @llvm.ceil.f32
363 resd = __builtin_ceil(D);
364 // CHECK: call double @llvm.ceil.f64
366 resld = __builtin_ceill(LD);
367 // CHECK: call x86_fp80 @llvm.ceil.f80
369 resf = __builtin_floorf(F);
370 // CHECK: call float @llvm.floor.f32
372 resd = __builtin_floor(D);
373 // CHECK: call double @llvm.floor.f64
375 resld = __builtin_floorl(LD);
376 // CHECK: call x86_fp80 @llvm.floor.f80
378 resf = __builtin_sqrtf(F);
379 // CHECK: call float @llvm.sqrt.f32(
381 resd = __builtin_sqrt(D);
382 // CHECK: call double @llvm.sqrt.f64(
384 resld = __builtin_sqrtl(LD);
385 // CHECK: call x86_fp80 @llvm.sqrt.f80
387 resf = __builtin_truncf(F);
388 // CHECK: call float @llvm.trunc.f32
390 resd = __builtin_trunc(D);
391 // CHECK: call double @llvm.trunc.f64
393 resld = __builtin_truncl(LD);
394 // CHECK: call x86_fp80 @llvm.trunc.f80
396 resf = __builtin_rintf(F);
397 // CHECK: call float @llvm.rint.f32
399 resd = __builtin_rint(D);
400 // CHECK: call double @llvm.rint.f64
402 resld = __builtin_rintl(LD);
403 // CHECK: call x86_fp80 @llvm.rint.f80
405 resf = __builtin_nearbyintf(F);
406 // CHECK: call float @llvm.nearbyint.f32
408 resd = __builtin_nearbyint(D);
409 // CHECK: call double @llvm.nearbyint.f64
411 resld = __builtin_nearbyintl(LD);
412 // CHECK: call x86_fp80 @llvm.nearbyint.f80
414 resf = __builtin_roundf(F);
415 // CHECK: call float @llvm.round.f32
417 resd = __builtin_round(D);
418 // CHECK: call double @llvm.round.f64
420 resld = __builtin_roundl(LD);
421 // CHECK: call x86_fp80 @llvm.round.f80
423 resf = __builtin_roundevenf(F);
424 // CHECK: call float @llvm.roundeven.f32
426 resd = __builtin_roundeven(D);
427 // CHECK: call double @llvm.roundeven.f64
429 resld = __builtin_roundevenl(LD);
430 // CHECK: call x86_fp80 @llvm.roundeven.f80
432 resli = __builtin_lroundf (F);
433 // CHECK: call i64 @llvm.lround.i64.f32
435 resli = __builtin_lround (D);
436 // CHECK: call i64 @llvm.lround.i64.f64
438 resli = __builtin_lroundl (LD);
439 // CHECK: call i64 @llvm.lround.i64.f80
441 resli = __builtin_lrintf (F);
442 // CHECK: call i64 @llvm.lrint.i64.f32
444 resli = __builtin_lrint (D);
445 // CHECK: call i64 @llvm.lrint.i64.f64
447 resli = __builtin_lrintl (LD);
448 // CHECK: call i64 @llvm.lrint.i64.f80
451 // __builtin_longjmp isn't supported on all platforms, so only test it on X86.
452 #ifdef __x86_64__
454 // CHECK-LABEL: define{{.*}} void @test_builtin_longjmp(ptr{{.*}}
455 void test_builtin_longjmp(void **buffer) {
456 // CHECK: [[LOAD:%[a-z0-9]+]] = load ptr, ptr
457 // CHECK-NEXT: call void @llvm.eh.sjlj.longjmp(ptr [[LOAD]])
458 __builtin_longjmp(buffer, 1);
459 // CHECK-NEXT: unreachable
462 #endif
464 // CHECK-LABEL: define{{.*}} void @test_memory_builtins
465 void test_memory_builtins(int n) {
466 // CHECK: call ptr @malloc
467 void * p = __builtin_malloc(n);
468 // CHECK: call void @free
469 __builtin_free(p);
470 // CHECK: call ptr @calloc
471 p = __builtin_calloc(1, n);
472 // CHECK: call ptr @realloc
473 p = __builtin_realloc(p, n);
474 // CHECK: call void @free
475 __builtin_free(p);
478 // CHECK-LABEL: define{{.*}} i64 @test_builtin_readcyclecounter
479 long long test_builtin_readcyclecounter(void) {
480 // CHECK: call i64 @llvm.readcyclecounter()
481 return __builtin_readcyclecounter();
484 /// __builtin_launder should be a NOP in C since there are no vtables.
485 // CHECK-LABEL: define{{.*}} void @test_builtin_launder
486 void test_builtin_launder(int *p) {
487 // CHECK: [[TMP:%.*]] = load ptr,
488 // CHECK-NOT: @llvm.launder
489 // CHECK: store ptr [[TMP]],
490 int *d = __builtin_launder(p);
493 // __warn_memset_zero_len should be NOP, see https://sourceware.org/bugzilla/show_bug.cgi?id=25399
494 // CHECK-LABEL: define{{.*}} void @test___warn_memset_zero_len
495 void test___warn_memset_zero_len(void) {
496 // CHECK-NOT: @__warn_memset_zero_len
497 __warn_memset_zero_len();
500 // Behavior of __builtin_os_log differs between platforms, so only test on X86
501 #ifdef __x86_64__
503 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log
504 // CHECK: (ptr noundef %[[BUF:.*]], i32 noundef %[[I:.*]], ptr noundef %[[DATA:.*]])
505 void test_builtin_os_log(void *buf, int i, const char *data) {
506 volatile int len;
507 // CHECK: %[[BUF_ADDR:.*]] = alloca ptr, align 8
508 // CHECK: %[[I_ADDR:.*]] = alloca i32, align 4
509 // CHECK: %[[DATA_ADDR:.*]] = alloca ptr, align 8
510 // CHECK: %[[LEN:.*]] = alloca i32, align 4
511 // CHECK: store ptr %[[BUF]], ptr %[[BUF_ADDR]], align 8
512 // CHECK: store i32 %[[I]], ptr %[[I_ADDR]], align 4
513 // CHECK: store ptr %[[DATA]], ptr %[[DATA_ADDR]], align 8
515 // CHECK: store volatile i32 34, ptr %[[LEN]]
516 len = __builtin_os_log_format_buffer_size("%d %{public}s %{private}.16P", i, data, data);
518 // CHECK: %[[V1:.*]] = load ptr, ptr %[[BUF_ADDR]]
519 // CHECK: %[[V2:.*]] = load i32, ptr %[[I_ADDR]]
520 // CHECK: %[[V3:.*]] = load ptr, ptr %[[DATA_ADDR]]
521 // CHECK: %[[V4:.*]] = ptrtoint ptr %[[V3]] to i64
522 // CHECK: %[[V5:.*]] = load ptr, ptr %[[DATA_ADDR]]
523 // CHECK: %[[V6:.*]] = ptrtoint ptr %[[V5]] to i64
524 // CHECK: call void @__os_log_helper_1_3_4_4_0_8_34_4_17_8_49(ptr noundef %[[V1]], i32 noundef %[[V2]], i64 noundef %[[V4]], i32 noundef 16, i64 noundef %[[V6]])
525 __builtin_os_log_format(buf, "%d %{public}s %{private}.16P", i, data, data);
527 // privacy annotations aren't recognized when they are preceded or followed
528 // by non-whitespace characters.
530 // CHECK: call void @__os_log_helper_1_2_1_8_32(
531 __builtin_os_log_format(buf, "%{xyz public}s", data);
533 // CHECK: call void @__os_log_helper_1_2_1_8_32(
534 __builtin_os_log_format(buf, "%{ public xyz}s", data);
536 // CHECK: call void @__os_log_helper_1_2_1_8_32(
537 __builtin_os_log_format(buf, "%{ public1}s", data);
539 // Privacy annotations do not have to be in the first comma-delimited string.
541 // CHECK: call void @__os_log_helper_1_2_1_8_34(
542 __builtin_os_log_format(buf, "%{ xyz, public }s", "abc");
544 // CHECK: call void @__os_log_helper_1_3_1_8_33(
545 __builtin_os_log_format(buf, "%{ xyz, private }s", "abc");
547 // CHECK: call void @__os_log_helper_1_3_1_8_37(
548 __builtin_os_log_format(buf, "%{ xyz, sensitive }s", "abc");
550 // The strictest privacy annotation in the string wins.
552 // CHECK: call void @__os_log_helper_1_3_1_8_33(
553 __builtin_os_log_format(buf, "%{ private, public, private, public}s", "abc");
555 // CHECK: call void @__os_log_helper_1_3_1_8_37(
556 __builtin_os_log_format(buf, "%{ private, sensitive, private, public}s",
557 "abc");
559 // CHECK: store volatile i32 22, ptr %[[LEN]], align 4
560 len = __builtin_os_log_format_buffer_size("%{mask.xyz}s", "abc");
562 // CHECK: call void @__os_log_helper_1_2_2_8_112_8_34(ptr noundef {{.*}}, i64 noundef 8026488
563 __builtin_os_log_format(buf, "%{mask.xyz, public}s", "abc");
565 // CHECK: call void @__os_log_helper_1_3_2_8_112_4_1(ptr noundef {{.*}}, i64 noundef 8026488
566 __builtin_os_log_format(buf, "%{ mask.xyz, private }d", 11);
568 // Mask type is silently ignored.
569 // CHECK: call void @__os_log_helper_1_2_1_8_32(
570 __builtin_os_log_format(buf, "%{ mask. xyz }s", "abc");
572 // CHECK: call void @__os_log_helper_1_2_1_8_32(
573 __builtin_os_log_format(buf, "%{ mask.xy z }s", "abc");
576 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_3_4_4_0_8_34_4_17_8_49
577 // CHECK: (ptr noundef %[[BUFFER:.*]], i32 noundef %[[ARG0:.*]], i64 noundef %[[ARG1:.*]], i32 noundef %[[ARG2:.*]], i64 noundef %[[ARG3:.*]])
579 // CHECK: %[[BUFFER_ADDR:.*]] = alloca ptr, align 8
580 // CHECK: %[[ARG0_ADDR:.*]] = alloca i32, align 4
581 // CHECK: %[[ARG1_ADDR:.*]] = alloca i64, align 8
582 // CHECK: %[[ARG2_ADDR:.*]] = alloca i32, align 4
583 // CHECK: %[[ARG3_ADDR:.*]] = alloca i64, align 8
584 // CHECK: store ptr %[[BUFFER]], ptr %[[BUFFER_ADDR]], align 8
585 // CHECK: store i32 %[[ARG0]], ptr %[[ARG0_ADDR]], align 4
586 // CHECK: store i64 %[[ARG1]], ptr %[[ARG1_ADDR]], align 8
587 // CHECK: store i32 %[[ARG2]], ptr %[[ARG2_ADDR]], align 4
588 // CHECK: store i64 %[[ARG3]], ptr %[[ARG3_ADDR]], align 8
589 // CHECK: %[[BUF:.*]] = load ptr, ptr %[[BUFFER_ADDR]], align 8
590 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, ptr %[[BUF]], i64 0
591 // CHECK: store i8 3, ptr %[[SUMMARY]], align 1
592 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, ptr %[[BUF]], i64 1
593 // CHECK: store i8 4, ptr %[[NUMARGS]], align 1
594 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, ptr %[[BUF]], i64 2
595 // CHECK: store i8 0, ptr %[[ARGDESCRIPTOR]], align 1
596 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, ptr %[[BUF]], i64 3
597 // CHECK: store i8 4, ptr %[[ARGSIZE]], align 1
598 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, ptr %[[BUF]], i64 4
599 // CHECK: %[[V0:.*]] = load i32, ptr %[[ARG0_ADDR]], align 4
600 // CHECK: store i32 %[[V0]], ptr %[[ARGDATA]], align 1
601 // CHECK: %[[ARGDESCRIPTOR1:.*]] = getelementptr i8, ptr %[[BUF]], i64 8
602 // CHECK: store i8 34, ptr %[[ARGDESCRIPTOR1]], align 1
603 // CHECK: %[[ARGSIZE2:.*]] = getelementptr i8, ptr %[[BUF]], i64 9
604 // CHECK: store i8 8, ptr %[[ARGSIZE2]], align 1
605 // CHECK: %[[ARGDATA3:.*]] = getelementptr i8, ptr %[[BUF]], i64 10
606 // CHECK: %[[V1:.*]] = load i64, ptr %[[ARG1_ADDR]], align 8
607 // CHECK: store i64 %[[V1]], ptr %[[ARGDATA3]], align 1
608 // CHECK: %[[ARGDESCRIPTOR5:.*]] = getelementptr i8, ptr %[[BUF]], i64 18
609 // CHECK: store i8 17, ptr %[[ARGDESCRIPTOR5]], align 1
610 // CHECK: %[[ARGSIZE6:.*]] = getelementptr i8, ptr %[[BUF]], i64 19
611 // CHECK: store i8 4, ptr %[[ARGSIZE6]], align 1
612 // CHECK: %[[ARGDATA7:.*]] = getelementptr i8, ptr %[[BUF]], i64 20
613 // CHECK: %[[V2:.*]] = load i32, ptr %[[ARG2_ADDR]], align 4
614 // CHECK: store i32 %[[V2]], ptr %[[ARGDATA7]], align 1
615 // CHECK: %[[ARGDESCRIPTOR9:.*]] = getelementptr i8, ptr %[[BUF]], i64 24
616 // CHECK: store i8 49, ptr %[[ARGDESCRIPTOR9]], align 1
617 // CHECK: %[[ARGSIZE10:.*]] = getelementptr i8, ptr %[[BUF]], i64 25
618 // CHECK: store i8 8, ptr %[[ARGSIZE10]], align 1
619 // CHECK: %[[ARGDATA11:.*]] = getelementptr i8, ptr %[[BUF]], i64 26
620 // CHECK: %[[V3:.*]] = load i64, ptr %[[ARG3_ADDR]], align 8
621 // CHECK: store i64 %[[V3]], ptr %[[ARGDATA11]], align 1
623 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_wide
624 // CHECK: (ptr noundef %[[BUF:.*]], ptr noundef %[[DATA:.*]], ptr noundef %[[STR:.*]])
625 typedef int wchar_t;
626 void test_builtin_os_log_wide(void *buf, const char *data, wchar_t *str) {
627 volatile int len;
629 // CHECK: %[[BUF_ADDR:.*]] = alloca ptr, align 8
630 // CHECK: %[[DATA_ADDR:.*]] = alloca ptr, align 8
631 // CHECK: %[[STR_ADDR:.*]] = alloca ptr, align 8
632 // CHECK: %[[LEN:.*]] = alloca i32, align 4
633 // CHECK: store ptr %[[BUF]], ptr %[[BUF_ADDR]], align 8
634 // CHECK: store ptr %[[DATA]], ptr %[[DATA_ADDR]], align 8
635 // CHECK: store ptr %[[STR]], ptr %[[STR_ADDR]], align 8
637 // CHECK: store volatile i32 12, ptr %[[LEN]], align 4
638 len = __builtin_os_log_format_buffer_size("%S", str);
640 // CHECK: %[[V1:.*]] = load ptr, ptr %[[BUF_ADDR]], align 8
641 // CHECK: %[[V2:.*]] = load ptr, ptr %[[STR_ADDR]], align 8
642 // CHECK: %[[V3:.*]] = ptrtoint ptr %[[V2]] to i64
643 // CHECK: call void @__os_log_helper_1_2_1_8_80(ptr noundef %[[V1]], i64 noundef %[[V3]])
645 __builtin_os_log_format(buf, "%S", str);
648 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_1_8_80
649 // CHECK: (ptr noundef %[[BUFFER:.*]], i64 noundef %[[ARG0:.*]])
651 // CHECK: %[[BUFFER_ADDR:.*]] = alloca ptr, align 8
652 // CHECK: %[[ARG0_ADDR:.*]] = alloca i64, align 8
653 // CHECK: store ptr %[[BUFFER]], ptr %[[BUFFER_ADDR]], align 8
654 // CHECK: store i64 %[[ARG0]], ptr %[[ARG0_ADDR]], align 8
655 // CHECK: %[[BUF:.*]] = load ptr, ptr %[[BUFFER_ADDR]], align 8
656 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, ptr %[[BUF]], i64 0
657 // CHECK: store i8 2, ptr %[[SUMMARY]], align 1
658 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, ptr %[[BUF]], i64 1
659 // CHECK: store i8 1, ptr %[[NUMARGS]], align 1
660 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, ptr %[[BUF]], i64 2
661 // CHECK: store i8 80, ptr %[[ARGDESCRIPTOR]], align 1
662 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, ptr %[[BUF]], i64 3
663 // CHECK: store i8 8, ptr %[[ARGSIZE]], align 1
664 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, ptr %[[BUF]], i64 4
665 // CHECK: %[[V0:.*]] = load i64, ptr %[[ARG0_ADDR]], align 8
666 // CHECK: store i64 %[[V0]], ptr %[[ARGDATA]], align 1
668 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_precision_width
669 // CHECK: (ptr noundef %[[BUF:.*]], ptr noundef %[[DATA:.*]], i32 noundef %[[PRECISION:.*]], i32 noundef %[[WIDTH:.*]])
670 void test_builtin_os_log_precision_width(void *buf, const char *data,
671 int precision, int width) {
672 volatile int len;
673 // CHECK: %[[BUF_ADDR:.*]] = alloca ptr, align 8
674 // CHECK: %[[DATA_ADDR:.*]] = alloca ptr, align 8
675 // CHECK: %[[PRECISION_ADDR:.*]] = alloca i32, align 4
676 // CHECK: %[[WIDTH_ADDR:.*]] = alloca i32, align 4
677 // CHECK: %[[LEN:.*]] = alloca i32, align 4
678 // CHECK: store ptr %[[BUF]], ptr %[[BUF_ADDR]], align 8
679 // CHECK: store ptr %[[DATA]], ptr %[[DATA_ADDR]], align 8
680 // CHECK: store i32 %[[PRECISION]], ptr %[[PRECISION_ADDR]], align 4
681 // CHECK: store i32 %[[WIDTH]], ptr %[[WIDTH_ADDR]], align 4
683 // CHECK: store volatile i32 24, ptr %[[LEN]], align 4
684 len = __builtin_os_log_format_buffer_size("Hello %*.*s World", precision, width, data);
686 // CHECK: %[[V1:.*]] = load ptr, ptr %[[BUF_ADDR]], align 8
687 // CHECK: %[[V2:.*]] = load i32, ptr %[[PRECISION_ADDR]], align 4
688 // CHECK: %[[V3:.*]] = load i32, ptr %[[WIDTH_ADDR]], align 4
689 // CHECK: %[[V4:.*]] = load ptr, ptr %[[DATA_ADDR]], align 8
690 // CHECK: %[[V5:.*]] = ptrtoint ptr %[[V4]] to i64
691 // CHECK: call void @__os_log_helper_1_2_3_4_0_4_16_8_32(ptr noundef %[[V1]], i32 noundef %[[V2]], i32 noundef %[[V3]], i64 noundef %[[V5]])
692 __builtin_os_log_format(buf, "Hello %*.*s World", precision, width, data);
695 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_3_4_0_4_16_8_32
696 // CHECK: (ptr noundef %[[BUFFER:.*]], i32 noundef %[[ARG0:.*]], i32 noundef %[[ARG1:.*]], i64 noundef %[[ARG2:.*]])
698 // CHECK: %[[BUFFER_ADDR:.*]] = alloca ptr, align 8
699 // CHECK: %[[ARG0_ADDR:.*]] = alloca i32, align 4
700 // CHECK: %[[ARG1_ADDR:.*]] = alloca i32, align 4
701 // CHECK: %[[ARG2_ADDR:.*]] = alloca i64, align 8
702 // CHECK: store ptr %[[BUFFER]], ptr %[[BUFFER_ADDR]], align 8
703 // CHECK: store i32 %[[ARG0]], ptr %[[ARG0_ADDR]], align 4
704 // CHECK: store i32 %[[ARG1]], ptr %[[ARG1_ADDR]], align 4
705 // CHECK: store i64 %[[ARG2]], ptr %[[ARG2_ADDR]], align 8
706 // CHECK: %[[BUF:.*]] = load ptr, ptr %[[BUFFER_ADDR]], align 8
707 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, ptr %[[BUF]], i64 0
708 // CHECK: store i8 2, ptr %[[SUMMARY]], align 1
709 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, ptr %[[BUF]], i64 1
710 // CHECK: store i8 3, ptr %[[NUMARGS]], align 1
711 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, ptr %[[BUF]], i64 2
712 // CHECK: store i8 0, ptr %[[ARGDESCRIPTOR]], align 1
713 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, ptr %[[BUF]], i64 3
714 // CHECK: store i8 4, ptr %[[ARGSIZE]], align 1
715 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, ptr %[[BUF]], i64 4
716 // CHECK: %[[V0:.*]] = load i32, ptr %[[ARG0_ADDR]], align 4
717 // CHECK: store i32 %[[V0]], ptr %[[ARGDATA]], align 1
718 // CHECK: %[[ARGDESCRIPTOR1:.*]] = getelementptr i8, ptr %[[BUF]], i64 8
719 // CHECK: store i8 16, ptr %[[ARGDESCRIPTOR1]], align 1
720 // CHECK: %[[ARGSIZE2:.*]] = getelementptr i8, ptr %[[BUF]], i64 9
721 // CHECK: store i8 4, ptr %[[ARGSIZE2]], align 1
722 // CHECK: %[[ARGDATA3:.*]] = getelementptr i8, ptr %[[BUF]], i64 10
723 // CHECK: %[[V1:.*]] = load i32, ptr %[[ARG1_ADDR]], align 4
724 // CHECK: store i32 %[[V1]], ptr %[[ARGDATA3]], align 1
725 // CHECK: %[[ARGDESCRIPTOR5:.*]] = getelementptr i8, ptr %[[BUF]], i64 14
726 // CHECK: store i8 32, ptr %[[ARGDESCRIPTOR5]], align 1
727 // CHECK: %[[ARGSIZE6:.*]] = getelementptr i8, ptr %[[BUF]], i64 15
728 // CHECK: store i8 8, ptr %[[ARGSIZE6]], align 1
729 // CHECK: %[[ARGDATA7:.*]] = getelementptr i8, ptr %[[BUF]], i64 16
730 // CHECK: %[[V2:.*]] = load i64, ptr %[[ARG2_ADDR]], align 8
731 // CHECK: store i64 %[[V2]], ptr %[[ARGDATA7]], align 1
733 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_invalid
734 // CHECK: (ptr noundef %[[BUF:.*]], i32 noundef %[[DATA:.*]])
735 void test_builtin_os_log_invalid(void *buf, int data) {
736 volatile int len;
737 // CHECK: %[[BUF_ADDR:.*]] = alloca ptr, align 8
738 // CHECK: %[[DATA_ADDR:.*]] = alloca i32, align 4
739 // CHECK: %[[LEN:.*]] = alloca i32, align 4
740 // CHECK: store ptr %[[BUF]], ptr %[[BUF_ADDR]], align 8
741 // CHECK: store i32 %[[DATA]], ptr %[[DATA_ADDR]], align 4
743 // CHECK: store volatile i32 8, ptr %[[LEN]], align 4
744 len = __builtin_os_log_format_buffer_size("invalid specifier %: %d even a trailing one%", data);
746 // CHECK: %[[V1:.*]] = load ptr, ptr %[[BUF_ADDR]], align 8
747 // CHECK: %[[V2:.*]] = load i32, ptr %[[DATA_ADDR]], align 4
748 // CHECK: call void @__os_log_helper_1_0_1_4_0(ptr noundef %[[V1]], i32 noundef %[[V2]])
750 __builtin_os_log_format(buf, "invalid specifier %: %d even a trailing one%", data);
753 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_0_1_4_0
754 // CHECK: (ptr noundef %[[BUFFER:.*]], i32 noundef %[[ARG0:.*]])
756 // CHECK: %[[BUFFER_ADDR:.*]] = alloca ptr, align 8
757 // CHECK: %[[ARG0_ADDR:.*]] = alloca i32, align 4
758 // CHECK: store ptr %[[BUFFER]], ptr %[[BUFFER_ADDR]], align 8
759 // CHECK: store i32 %[[ARG0]], ptr %[[ARG0_ADDR]], align 4
760 // CHECK: %[[BUF:.*]] = load ptr, ptr %[[BUFFER_ADDR]], align 8
761 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, ptr %[[BUF]], i64 0
762 // CHECK: store i8 0, ptr %[[SUMMARY]], align 1
763 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, ptr %[[BUF]], i64 1
764 // CHECK: store i8 1, ptr %[[NUMARGS]], align 1
765 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, ptr %[[BUF]], i64 2
766 // CHECK: store i8 0, ptr %[[ARGDESCRIPTOR]], align 1
767 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, ptr %[[BUF]], i64 3
768 // CHECK: store i8 4, ptr %[[ARGSIZE]], align 1
769 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, ptr %[[BUF]], i64 4
770 // CHECK: %[[V0:.*]] = load i32, ptr %[[ARG0_ADDR]], align 4
771 // CHECK: store i32 %[[V0]], ptr %[[ARGDATA]], align 1
773 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_percent
774 // CHECK: (ptr noundef %[[BUF:.*]], ptr noundef %[[DATA1:.*]], ptr noundef %[[DATA2:.*]])
775 // Check that the %% which does not consume any argument is correctly handled
776 void test_builtin_os_log_percent(void *buf, const char *data1, const char *data2) {
777 volatile int len;
778 // CHECK: %[[BUF_ADDR:.*]] = alloca ptr, align 8
779 // CHECK: %[[DATA1_ADDR:.*]] = alloca ptr, align 8
780 // CHECK: %[[DATA2_ADDR:.*]] = alloca ptr, align 8
781 // CHECK: %[[LEN:.*]] = alloca i32, align 4
782 // CHECK: store ptr %[[BUF]], ptr %[[BUF_ADDR]], align 8
783 // CHECK: store ptr %[[DATA1]], ptr %[[DATA1_ADDR]], align 8
784 // CHECK: store ptr %[[DATA2]], ptr %[[DATA2_ADDR]], align 8
785 // CHECK: store volatile i32 22, ptr %[[LEN]], align 4
787 len = __builtin_os_log_format_buffer_size("%s %% %s", data1, data2);
789 // CHECK: %[[V1:.*]] = load ptr, ptr %[[BUF_ADDR]], align 8
790 // CHECK: %[[V2:.*]] = load ptr, ptr %[[DATA1_ADDR]], align 8
791 // CHECK: %[[V3:.*]] = ptrtoint ptr %[[V2]] to i64
792 // CHECK: %[[V4:.*]] = load ptr, ptr %[[DATA2_ADDR]], align 8
793 // CHECK: %[[V5:.*]] = ptrtoint ptr %[[V4]] to i64
794 // CHECK: call void @__os_log_helper_1_2_2_8_32_8_32(ptr noundef %[[V1]], i64 noundef %[[V3]], i64 noundef %[[V5]])
796 __builtin_os_log_format(buf, "%s %% %s", data1, data2);
799 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_2_8_32_8_32
800 // CHECK: (ptr noundef %[[BUFFER:.*]], i64 noundef %[[ARG0:.*]], i64 noundef %[[ARG1:.*]])
802 // CHECK: %[[BUFFER_ADDR:.*]] = alloca ptr, align 8
803 // CHECK: %[[ARG0_ADDR:.*]] = alloca i64, align 8
804 // CHECK: %[[ARG1_ADDR:.*]] = alloca i64, align 8
805 // CHECK: store ptr %[[BUFFER]], ptr %[[BUFFER_ADDR]], align 8
806 // CHECK: store i64 %[[ARG0]], ptr %[[ARG0_ADDR]], align 8
807 // CHECK: store i64 %[[ARG1]], ptr %[[ARG1_ADDR]], align 8
808 // CHECK: %[[BUF:.*]] = load ptr, ptr %[[BUFFER_ADDR]], align 8
809 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, ptr %[[BUF]], i64 0
810 // CHECK: store i8 2, ptr %[[SUMMARY]], align 1
811 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, ptr %[[BUF]], i64 1
812 // CHECK: store i8 2, ptr %[[NUMARGS]], align 1
813 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, ptr %[[BUF]], i64 2
814 // CHECK: store i8 32, ptr %[[ARGDESCRIPTOR]], align 1
815 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, ptr %[[BUF]], i64 3
816 // CHECK: store i8 8, ptr %[[ARGSIZE]], align 1
817 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, ptr %[[BUF]], i64 4
818 // CHECK: %[[V0:.*]] = load i64, ptr %[[ARG0_ADDR]], align 8
819 // CHECK: store i64 %[[V0]], ptr %[[ARGDATA]], align 1
820 // CHECK: %[[ARGDESCRIPTOR1:.*]] = getelementptr i8, ptr %[[BUF]], i64 12
821 // CHECK: store i8 32, ptr %[[ARGDESCRIPTOR1]], align 1
822 // CHECK: %[[ARGSIZE2:.*]] = getelementptr i8, ptr %[[BUF]], i64 13
823 // CHECK: store i8 8, ptr %[[ARGSIZE2]], align 1
824 // CHECK: %[[ARGDATA3:.*]] = getelementptr i8, ptr %[[BUF]], i64 14
825 // CHECK: %[[V1:.*]] = load i64, ptr %[[ARG1_ADDR]], align 8
826 // CHECK: store i64 %[[V1]], ptr %[[ARGDATA3]], align 1
828 // Check that the following two functions call the same helper function.
830 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_merge_helper0
831 // CHECK: call void @__os_log_helper_1_0_2_4_0_8_0(
832 void test_builtin_os_log_merge_helper0(void *buf, int i, double d) {
833 __builtin_os_log_format(buf, "%d %f", i, d);
836 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_0_2_4_0_8_0(
838 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_merge_helper1
839 // CHECK: call void @__os_log_helper_1_0_2_4_0_8_0(
840 void test_builtin_os_log_merge_helper1(void *buf, unsigned u, long long ll) {
841 __builtin_os_log_format(buf, "%u %lld", u, ll);
844 // Check that this function doesn't write past the end of array 'buf'.
846 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_errno
847 void test_builtin_os_log_errno(void) {
848 // CHECK-NOT: @stacksave
849 // CHECK: %[[BUF:.*]] = alloca [4 x i8], align 1
850 // CHECK: %[[DECAY:.*]] = getelementptr inbounds [4 x i8], ptr %[[BUF]], i64 0, i64 0
851 // CHECK: call void @__os_log_helper_1_2_1_0_96(ptr noundef %[[DECAY]])
852 // CHECK-NOT: @stackrestore
854 char buf[__builtin_os_log_format_buffer_size("%m")];
855 __builtin_os_log_format(buf, "%m");
858 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_1_0_96
859 // CHECK: (ptr noundef %[[BUFFER:.*]])
861 // CHECK: %[[BUFFER_ADDR:.*]] = alloca ptr, align 8
862 // CHECK: store ptr %[[BUFFER]], ptr %[[BUFFER_ADDR]], align 8
863 // CHECK: %[[BUF:.*]] = load ptr, ptr %[[BUFFER_ADDR]], align 8
864 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, ptr %[[BUF]], i64 0
865 // CHECK: store i8 2, ptr %[[SUMMARY]], align 1
866 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, ptr %[[BUF]], i64 1
867 // CHECK: store i8 1, ptr %[[NUMARGS]], align 1
868 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, ptr %[[BUF]], i64 2
869 // CHECK: store i8 96, ptr %[[ARGDESCRIPTOR]], align 1
870 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, ptr %[[BUF]], i64 3
871 // CHECK: store i8 0, ptr %[[ARGSIZE]], align 1
872 // CHECK-NEXT: ret void
874 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_long_double
875 // CHECK: (ptr noundef %[[BUF:.*]], x86_fp80 noundef %[[LD:.*]])
876 void test_builtin_os_log_long_double(void *buf, long double ld) {
877 // CHECK: %[[BUF_ADDR:.*]] = alloca ptr, align 8
878 // CHECK: %[[LD_ADDR:.*]] = alloca x86_fp80, align 16
879 // CHECK: %[[COERCE:.*]] = alloca i128, align 16
880 // CHECK: store ptr %[[BUF]], ptr %[[BUF_ADDR]], align 8
881 // CHECK: store x86_fp80 %[[LD]], ptr %[[LD_ADDR]], align 16
882 // CHECK: %[[V0:.*]] = load ptr, ptr %[[BUF_ADDR]], align 8
883 // CHECK: %[[V1:.*]] = load x86_fp80, ptr %[[LD_ADDR]], align 16
884 // CHECK: %[[V2:.*]] = bitcast x86_fp80 %[[V1]] to i80
885 // CHECK: %[[V3:.*]] = zext i80 %[[V2]] to i128
886 // CHECK: store i128 %[[V3]], ptr %[[COERCE]], align 16
887 // CHECK: %[[V5:.*]] = getelementptr inbounds { i64, i64 }, ptr %[[COERCE]], i32 0, i32 0
888 // CHECK: %[[V6:.*]] = load i64, ptr %[[V5]], align 16
889 // CHECK: %[[V7:.*]] = getelementptr inbounds { i64, i64 }, ptr %[[COERCE]], i32 0, i32 1
890 // CHECK: %[[V8:.*]] = load i64, ptr %[[V7]], align 8
891 // CHECK: call void @__os_log_helper_1_0_1_16_0(ptr noundef %[[V0]], i64 noundef %[[V6]], i64 noundef %[[V8]])
893 __builtin_os_log_format(buf, "%Lf", ld);
896 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_0_1_16_0
897 // CHECK: (ptr noundef %[[BUFFER:.*]], i64 noundef %[[ARG0_COERCE0:.*]], i64 noundef %[[ARG0_COERCE1:.*]])
899 // CHECK: %[[ARG0:.*]] = alloca i128, align 16
900 // CHECK: %[[BUFFER_ADDR:.*]] = alloca ptr, align 8
901 // CHECK: %[[ARG0_ADDR:.*]] = alloca i128, align 16
902 // CHECK: %[[V1:.*]] = getelementptr inbounds { i64, i64 }, ptr %[[ARG0]], i32 0, i32 0
903 // CHECK: store i64 %[[ARG0_COERCE0]], ptr %[[V1]], align 16
904 // CHECK: %[[V2:.*]] = getelementptr inbounds { i64, i64 }, ptr %[[ARG0]], i32 0, i32 1
905 // CHECK: store i64 %[[ARG0_COERCE1]], ptr %[[V2]], align 8
906 // CHECK: %[[ARG01:.*]] = load i128, ptr %[[ARG0]], align 16
907 // CHECK: store ptr %[[BUFFER]], ptr %[[BUFFER_ADDR]], align 8
908 // CHECK: store i128 %[[ARG01]], ptr %[[ARG0_ADDR]], align 16
909 // CHECK: %[[BUF:.*]] = load ptr, ptr %[[BUFFER_ADDR]], align 8
910 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, ptr %[[BUF]], i64 0
911 // CHECK: store i8 0, ptr %[[SUMMARY]], align 1
912 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, ptr %[[BUF]], i64 1
913 // CHECK: store i8 1, ptr %[[NUMARGS]], align 1
914 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, ptr %[[BUF]], i64 2
915 // CHECK: store i8 0, ptr %[[ARGDESCRIPTOR]], align 1
916 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, ptr %[[BUF]], i64 3
917 // CHECK: store i8 16, ptr %[[ARGSIZE]], align 1
918 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, ptr %[[BUF]], i64 4
919 // CHECK: %[[V3:.*]] = load i128, ptr %[[ARG0_ADDR]], align 16
920 // CHECK: store i128 %[[V3]], ptr %[[ARGDATA]], align 1
922 #endif