1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
4 declare {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b)
5 declare {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a, i8 %b)
6 declare {i8, i1} @llvm.usub.with.overflow.i8(i8 %a, i8 %b)
7 declare {i8, i1} @llvm.ssub.with.overflow.i8(i8 %a, i8 %b)
8 declare {i8, i1} @llvm.umul.with.overflow.i8(i8 %a, i8 %b)
9 declare {i8, i1} @llvm.smul.with.overflow.i8(i8 %a, i8 %b)
11 define i1 @test_uadd1() {
12 ; CHECK-LABEL: @test_uadd1(
13 ; CHECK-NEXT: ret i1 true
15 %x = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 254, i8 3)
16 %overflow = extractvalue {i8, i1} %x, 1
20 define i8 @test_uadd2() {
21 ; CHECK-LABEL: @test_uadd2(
22 ; CHECK-NEXT: ret i8 42
24 %x = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 254, i8 44)
25 %result = extractvalue {i8, i1} %x, 0
29 define {i8, i1} @test_uadd3(i8 %v) {
30 ; CHECK-LABEL: @test_uadd3(
31 ; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false }
33 %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %v, i8 undef)
37 define {i8, i1} @test_uadd3_poison(i8 %v) {
38 ; CHECK-LABEL: @test_uadd3_poison(
39 ; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false }
41 %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %v, i8 poison)
45 define {i8, i1} @test_uadd4(i8 %v) {
46 ; CHECK-LABEL: @test_uadd4(
47 ; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false }
49 %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 undef, i8 %v)
53 define {i8, i1} @test_uadd4_poison(i8 %v) {
54 ; CHECK-LABEL: @test_uadd4_poison(
55 ; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false }
57 %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 poison, i8 %v)
61 define i1 @test_sadd1() {
62 ; CHECK-LABEL: @test_sadd1(
63 ; CHECK-NEXT: ret i1 true
65 %x = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 126, i8 3)
66 %overflow = extractvalue {i8, i1} %x, 1
70 define i8 @test_sadd2() {
71 ; CHECK-LABEL: @test_sadd2(
72 ; CHECK-NEXT: ret i8 -86
74 %x = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 126, i8 44)
75 %result = extractvalue {i8, i1} %x, 0
79 define {i8, i1} @test_sadd3(i8 %v) {
80 ; CHECK-LABEL: @test_sadd3(
81 ; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false }
83 %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v, i8 undef)
87 define {i8, i1} @test_sadd3_poison(i8 %v) {
88 ; CHECK-LABEL: @test_sadd3_poison(
89 ; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false }
91 %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v, i8 poison)
95 define {i8, i1} @test_sadd4(i8 %v) {
96 ; CHECK-LABEL: @test_sadd4(
97 ; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false }
99 %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 undef, i8 %v)
103 define {i8, i1} @test_sadd4_poison(i8 %v) {
104 ; CHECK-LABEL: @test_sadd4_poison(
105 ; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false }
107 %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 poison, i8 %v)
111 define {i8, i1} @test_usub1(i8 %V) {
112 ; CHECK-LABEL: @test_usub1(
113 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
115 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 %V)
119 define {i8, i1} @test_usub2(i8 %V) {
120 ; CHECK-LABEL: @test_usub2(
121 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
123 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 undef)
127 define {i8, i1} @test_usub2_poison(i8 %V) {
128 ; CHECK-LABEL: @test_usub2_poison(
129 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
131 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 poison)
135 define {i8, i1} @test_usub3(i8 %V) {
136 ; CHECK-LABEL: @test_usub3(
137 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
139 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 undef, i8 %V)
143 define {i8, i1} @test_usub3_poison(i8 %V) {
144 ; CHECK-LABEL: @test_usub3_poison(
145 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
147 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 poison, i8 %V)
151 define {i8, i1} @test_ssub1(i8 %V) {
152 ; CHECK-LABEL: @test_ssub1(
153 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
155 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %V, i8 %V)
159 define {i8, i1} @test_ssub2(i8 %V) {
160 ; CHECK-LABEL: @test_ssub2(
161 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
163 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %V, i8 undef)
167 define {i8, i1} @test_ssub2_poison(i8 %V) {
168 ; CHECK-LABEL: @test_ssub2_poison(
169 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
171 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %V, i8 poison)
175 define {i8, i1} @test_ssub3(i8 %V) {
176 ; CHECK-LABEL: @test_ssub3(
177 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
179 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 undef, i8 %V)
183 define {i8, i1} @test_ssub3_poison(i8 %V) {
184 ; CHECK-LABEL: @test_ssub3_poison(
185 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
187 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 poison, i8 %V)
191 define {i8, i1} @test_umul1(i8 %V) {
192 ; CHECK-LABEL: @test_umul1(
193 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
195 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %V, i8 0)
199 define {i8, i1} @test_umul2(i8 %V) {
200 ; CHECK-LABEL: @test_umul2(
201 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
203 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %V, i8 undef)
207 define {i8, i1} @test_umul2_poison(i8 %V) {
208 ; CHECK-LABEL: @test_umul2_poison(
209 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
211 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %V, i8 poison)
215 define {i8, i1} @test_umul3(i8 %V) {
216 ; CHECK-LABEL: @test_umul3(
217 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
219 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 0, i8 %V)
223 define {i8, i1} @test_umul4(i8 %V) {
224 ; CHECK-LABEL: @test_umul4(
225 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
227 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 undef, i8 %V)
231 define {i8, i1} @test_umul4_poison(i8 %V) {
232 ; CHECK-LABEL: @test_umul4_poison(
233 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
235 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 poison, i8 %V)
239 define {i8, i1} @test_smul1(i8 %V) {
240 ; CHECK-LABEL: @test_smul1(
241 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
243 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %V, i8 0)
247 define {i8, i1} @test_smul2(i8 %V) {
248 ; CHECK-LABEL: @test_smul2(
249 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
251 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %V, i8 undef)
255 define {i8, i1} @test_smul2_poison(i8 %V) {
256 ; CHECK-LABEL: @test_smul2_poison(
257 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
259 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %V, i8 poison)
263 define {i8, i1} @test_smul3(i8 %V) {
264 ; CHECK-LABEL: @test_smul3(
265 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
267 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 0, i8 %V)
271 define {i8, i1} @test_smul4(i8 %V) {
272 ; CHECK-LABEL: @test_smul4(
273 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
275 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 undef, i8 %V)
279 define {i8, i1} @test_smul4_poison(i8 %V) {
280 ; CHECK-LABEL: @test_smul4_poison(
281 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer
283 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 poison, i8 %V)
287 ; Test a non-intrinsic that we know about as a library call.
288 declare float @fabsf(float %x)
290 define float @test_fabs_libcall() {
291 ; CHECK-LABEL: @test_fabs_libcall(
292 ; CHECK-NEXT: [[X:%.*]] = call float @fabsf(float -4.200000e+01)
293 ; CHECK-NEXT: ret float 4.200000e+01
296 %x = call float @fabsf(float -42.0)
297 ; This is still a real function call, so instsimplify won't nuke it -- other
298 ; passes have to do that.
304 declare float @llvm.fabs.f32(float) nounwind readnone
305 declare float @llvm.floor.f32(float) nounwind readnone
306 declare float @llvm.ceil.f32(float) nounwind readnone
307 declare float @llvm.trunc.f32(float) nounwind readnone
308 declare float @llvm.rint.f32(float) nounwind readnone
309 declare float @llvm.nearbyint.f32(float) nounwind readnone
310 declare float @llvm.canonicalize.f32(float) nounwind readnone
311 declare float @llvm.arithmetic.fence.f32(float) nounwind readnone
313 ; Test idempotent intrinsics
314 define float @test_idempotence(float %a) {
315 ; CHECK-LABEL: @test_idempotence(
316 ; CHECK-NEXT: [[A0:%.*]] = call float @llvm.fabs.f32(float [[A:%.*]])
317 ; CHECK-NEXT: [[B0:%.*]] = call float @llvm.floor.f32(float [[A]])
318 ; CHECK-NEXT: [[C0:%.*]] = call float @llvm.ceil.f32(float [[A]])
319 ; CHECK-NEXT: [[D0:%.*]] = call float @llvm.trunc.f32(float [[A]])
320 ; CHECK-NEXT: [[E0:%.*]] = call float @llvm.rint.f32(float [[A]])
321 ; CHECK-NEXT: [[F0:%.*]] = call float @llvm.nearbyint.f32(float [[A]])
322 ; CHECK-NEXT: [[G0:%.*]] = call float @llvm.canonicalize.f32(float [[A]])
323 ; CHECK-NEXT: [[H0:%.*]] = call float @llvm.arithmetic.fence.f32(float [[A]])
324 ; CHECK-NEXT: [[R0:%.*]] = fadd float [[A0]], [[B0]]
325 ; CHECK-NEXT: [[R1:%.*]] = fadd float [[R0]], [[C0]]
326 ; CHECK-NEXT: [[R2:%.*]] = fadd float [[R1]], [[D0]]
327 ; CHECK-NEXT: [[R3:%.*]] = fadd float [[R2]], [[E0]]
328 ; CHECK-NEXT: [[R4:%.*]] = fadd float [[R3]], [[F0]]
329 ; CHECK-NEXT: [[R5:%.*]] = fadd float [[R4]], [[G0]]
330 ; CHECK-NEXT: [[R6:%.*]] = fadd float [[R5]], [[H0]]
331 ; CHECK-NEXT: ret float [[R6]]
334 %a0 = call float @llvm.fabs.f32(float %a)
335 %a1 = call float @llvm.fabs.f32(float %a0)
337 %b0 = call float @llvm.floor.f32(float %a)
338 %b1 = call float @llvm.floor.f32(float %b0)
340 %c0 = call float @llvm.ceil.f32(float %a)
341 %c1 = call float @llvm.ceil.f32(float %c0)
343 %d0 = call float @llvm.trunc.f32(float %a)
344 %d1 = call float @llvm.trunc.f32(float %d0)
346 %e0 = call float @llvm.rint.f32(float %a)
347 %e1 = call float @llvm.rint.f32(float %e0)
349 %f0 = call float @llvm.nearbyint.f32(float %a)
350 %f1 = call float @llvm.nearbyint.f32(float %f0)
352 %g0 = call float @llvm.canonicalize.f32(float %a)
353 %g1 = call float @llvm.canonicalize.f32(float %g0)
355 %h0 = call float @llvm.arithmetic.fence.f32(float %a)
356 %h1 = call float @llvm.arithmetic.fence.f32(float %h0)
358 %r0 = fadd float %a1, %b1
359 %r1 = fadd float %r0, %c1
360 %r2 = fadd float %r1, %d1
361 %r3 = fadd float %r2, %e1
362 %r4 = fadd float %r3, %f1
363 %r5 = fadd float %r4, %g1
364 %r6 = fadd float %r5, %h1
369 define ptr @operator_new() {
370 ; CHECK-LABEL: @operator_new(
372 ; CHECK-NEXT: [[CALL:%.*]] = tail call noalias ptr @_Znwm(i64 8)
373 ; CHECK-NEXT: br i1 false, label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]]
374 ; CHECK: cast.notnull:
375 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 4
376 ; CHECK-NEXT: br label [[CAST_END]]
378 ; CHECK-NEXT: [[CAST_RESULT:%.*]] = phi ptr [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ]
379 ; CHECK-NEXT: ret ptr [[CAST_RESULT]]
382 %call = tail call noalias ptr @_Znwm(i64 8)
383 %cmp = icmp eq ptr %call, null
384 br i1 %cmp, label %cast.end, label %cast.notnull
386 cast.notnull: ; preds = %entry
387 %add.ptr = getelementptr inbounds i8, ptr %call, i64 4
390 cast.end: ; preds = %cast.notnull, %entry
391 %cast.result = phi ptr [ %add.ptr, %cast.notnull ], [ null, %entry ]
396 declare nonnull noalias ptr @_Znwm(i64)
398 %"struct.std::nothrow_t" = type { i8 }
399 @_ZSt7nothrow = external global %"struct.std::nothrow_t"
401 define ptr @operator_new_nothrow_t() {
402 ; CHECK-LABEL: @operator_new_nothrow_t(
404 ; CHECK-NEXT: [[CALL:%.*]] = tail call noalias ptr @_ZnamRKSt9nothrow_t(i64 8, ptr @_ZSt7nothrow)
405 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[CALL]], null
406 ; CHECK-NEXT: br i1 [[CMP]], label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]]
407 ; CHECK: cast.notnull:
408 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 4
409 ; CHECK-NEXT: br label [[CAST_END]]
411 ; CHECK-NEXT: [[CAST_RESULT:%.*]] = phi ptr [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ]
412 ; CHECK-NEXT: ret ptr [[CAST_RESULT]]
415 %call = tail call noalias ptr @_ZnamRKSt9nothrow_t(i64 8, ptr @_ZSt7nothrow)
416 %cmp = icmp eq ptr %call, null
417 br i1 %cmp, label %cast.end, label %cast.notnull
419 cast.notnull: ; preds = %entry
420 %add.ptr = getelementptr inbounds i8, ptr %call, i64 4
423 cast.end: ; preds = %cast.notnull, %entry
424 %cast.result = phi ptr [ %add.ptr, %cast.notnull ], [ null, %entry ]
429 declare ptr @_ZnamRKSt9nothrow_t(i64, ptr) nounwind
431 define ptr @malloc_can_return_null() {
432 ; CHECK-LABEL: @malloc_can_return_null(
434 ; CHECK-NEXT: [[CALL:%.*]] = tail call noalias ptr @malloc(i64 8)
435 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[CALL]], null
436 ; CHECK-NEXT: br i1 [[CMP]], label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]]
437 ; CHECK: cast.notnull:
438 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 4
439 ; CHECK-NEXT: br label [[CAST_END]]
441 ; CHECK-NEXT: [[CAST_RESULT:%.*]] = phi ptr [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ]
442 ; CHECK-NEXT: ret ptr [[CAST_RESULT]]
445 %call = tail call noalias ptr @malloc(i64 8)
446 %cmp = icmp eq ptr %call, null
447 br i1 %cmp, label %cast.end, label %cast.notnull
449 cast.notnull: ; preds = %entry
450 %add.ptr = getelementptr inbounds i8, ptr %call, i64 4
453 cast.end: ; preds = %cast.notnull, %entry
454 %cast.result = phi ptr [ %add.ptr, %cast.notnull ], [ null, %entry ]
459 define i32 @call_null() {
460 ; CHECK-LABEL: @call_null(
462 ; CHECK-NEXT: [[CALL:%.*]] = call i32 null()
463 ; CHECK-NEXT: ret i32 poison
466 %call = call i32 null()
470 define i32 @call_undef() {
471 ; CHECK-LABEL: @call_undef(
473 ; CHECK-NEXT: [[CALL:%.*]] = call i32 undef()
474 ; CHECK-NEXT: ret i32 poison
477 %call = call i32 undef()
481 @GV = private constant [8 x i32] [i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49]
483 define <8 x i32> @partial_masked_load() {
484 ; CHECK-LABEL: @partial_masked_load(
485 ; CHECK-NEXT: ret <8 x i32> <i32 undef, i32 undef, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
487 %masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr getelementptr ([8 x i32], ptr @GV, i64 0, i64 -2), i32 4, <8 x i1> <i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
488 ret <8 x i32> %masked.load
491 define <8 x i32> @masked_load_undef_mask(ptr %V) {
492 ; CHECK-LABEL: @masked_load_undef_mask(
493 ; CHECK-NEXT: ret <8 x i32> <i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0>
495 %masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr %V, i32 4, <8 x i1> undef, <8 x i32> <i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0>)
496 ret <8 x i32> %masked.load
499 declare noalias ptr @malloc(i64)
501 declare <8 x i32> @llvm.masked.load.v8i32.p0(ptr, i32, <8 x i1>, <8 x i32>)
503 declare double @llvm.powi.f64.i16(double, i16)
504 declare <2 x double> @llvm.powi.v2f64.i16(<2 x double>, i16)
505 declare double @llvm.powi.f64.i32(double, i32)
506 declare <2 x double> @llvm.powi.v2f64.i32(<2 x double>, i32)
508 define double @constant_fold_powi() {
509 ; CHECK-LABEL: @constant_fold_powi(
510 ; CHECK-NEXT: ret double 9.000000e+00
512 %t0 = call double @llvm.powi.f64.i32(double 3.00000e+00, i32 2)
516 define double @constant_fold_powi_i16() {
517 ; CHECK-LABEL: @constant_fold_powi_i16(
518 ; CHECK-NEXT: ret double 9.000000e+00
520 %t0 = call double @llvm.powi.f64.i16(double 3.00000e+00, i16 2)
524 define <2 x double> @constant_fold_powi_vec() {
525 ; CHECK-LABEL: @constant_fold_powi_vec(
526 ; CHECK-NEXT: ret <2 x double> <double 9.000000e+00, double 2.500000e+01>
528 %t0 = call <2 x double> @llvm.powi.v2f64.i32(<2 x double> <double 3.00000e+00, double 5.00000e+00>, i32 2)
532 define <2 x double> @constant_fold_powi_vec_i16() {
533 ; CHECK-LABEL: @constant_fold_powi_vec_i16(
534 ; CHECK-NEXT: ret <2 x double> <double 9.000000e+00, double 2.500000e+01>
536 %t0 = call <2 x double> @llvm.powi.v2f64.i16(<2 x double> <double 3.00000e+00, double 5.00000e+00>, i16 2)
540 declare i8 @llvm.fshl.i8(i8, i8, i8)
541 declare i9 @llvm.fshr.i9(i9, i9, i9)
542 declare <2 x i7> @llvm.fshl.v2i7(<2 x i7>, <2 x i7>, <2 x i7>)
543 declare <2 x i8> @llvm.fshr.v2i8(<2 x i8>, <2 x i8>, <2 x i8>)
545 define i8 @fshl_no_shift(i8 %x, i8 %y) {
546 ; CHECK-LABEL: @fshl_no_shift(
547 ; CHECK-NEXT: ret i8 [[X:%.*]]
549 %z = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 0)
553 define i9 @fshr_no_shift(i9 %x, i9 %y) {
554 ; CHECK-LABEL: @fshr_no_shift(
555 ; CHECK-NEXT: ret i9 [[Y:%.*]]
557 %z = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 0)
561 define i8 @fshl_no_shift_modulo_bitwidth(i8 %x, i8 %y) {
562 ; CHECK-LABEL: @fshl_no_shift_modulo_bitwidth(
563 ; CHECK-NEXT: ret i8 [[X:%.*]]
565 %z = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 40)
569 define i9 @fshr_no_shift_modulo_bitwidth(i9 %x, i9 %y) {
570 ; CHECK-LABEL: @fshr_no_shift_modulo_bitwidth(
571 ; CHECK-NEXT: ret i9 [[Y:%.*]]
573 %z = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 189)
577 define <2 x i7> @fshl_no_shift_modulo_bitwidth_splat(<2 x i7> %x, <2 x i7> %y) {
578 ; CHECK-LABEL: @fshl_no_shift_modulo_bitwidth_splat(
579 ; CHECK-NEXT: ret <2 x i7> [[X:%.*]]
581 %z = call <2 x i7> @llvm.fshl.v2i7(<2 x i7> %x, <2 x i7> %y, <2 x i7> <i7 21, i7 21>)
585 define <2 x i8> @fshr_no_shift_modulo_bitwidth_splat(<2 x i8> %x, <2 x i8> %y) {
586 ; CHECK-LABEL: @fshr_no_shift_modulo_bitwidth_splat(
587 ; CHECK-NEXT: ret <2 x i8> [[Y:%.*]]
589 %z = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> %x, <2 x i8> %y, <2 x i8> <i8 72, i8 72>)
593 ; If y is poison, eliminating the guard is not safe.
595 define i8 @fshl_zero_shift_guard(i8 %x, i8 %y, i8 %sh) {
596 ; CHECK-LABEL: @fshl_zero_shift_guard(
597 ; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[SH:%.*]], 0
598 ; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[SH]])
599 ; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i8 [[X]], i8 [[F]]
600 ; CHECK-NEXT: ret i8 [[S]]
602 %c = icmp eq i8 %sh, 0
603 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh)
604 %s = select i1 %c, i8 %x, i8 %f
608 ; If y is poison, eliminating the guard is not safe.
610 define i8 @fshl_zero_shift_guard_swapped(i8 %x, i8 %y, i8 %sh) {
611 ; CHECK-LABEL: @fshl_zero_shift_guard_swapped(
612 ; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[SH:%.*]], 0
613 ; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[SH]])
614 ; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i8 [[F]], i8 [[X]]
615 ; CHECK-NEXT: ret i8 [[S]]
617 %c = icmp ne i8 %sh, 0
618 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh)
619 %s = select i1 %c, i8 %f, i8 %x
623 ; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted.
625 define i8 @fshl_zero_shift_guard_inverted(i8 %x, i8 %y, i8 %sh) {
626 ; CHECK-LABEL: @fshl_zero_shift_guard_inverted(
627 ; CHECK-NEXT: ret i8 [[X:%.*]]
629 %c = icmp eq i8 %sh, 0
630 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh)
631 %s = select i1 %c, i8 %f, i8 %x
635 ; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted.
637 define i8 @fshl_zero_shift_guard_inverted_swapped(i8 %x, i8 %y, i8 %sh) {
638 ; CHECK-LABEL: @fshl_zero_shift_guard_inverted_swapped(
639 ; CHECK-NEXT: ret i8 [[X:%.*]]
641 %c = icmp ne i8 %sh, 0
642 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh)
643 %s = select i1 %c, i8 %x, i8 %f
647 ; If x is poison, eliminating the guard is not safe.
649 define i9 @fshr_zero_shift_guard(i9 %x, i9 %y, i9 %sh) {
650 ; CHECK-LABEL: @fshr_zero_shift_guard(
651 ; CHECK-NEXT: [[C:%.*]] = icmp eq i9 [[SH:%.*]], 0
652 ; CHECK-NEXT: [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[Y:%.*]], i9 [[SH]])
653 ; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i9 [[Y]], i9 [[F]]
654 ; CHECK-NEXT: ret i9 [[S]]
656 %c = icmp eq i9 %sh, 0
657 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh)
658 %s = select i1 %c, i9 %y, i9 %f
662 ; If x is poison, eliminating the guard is not safe.
664 define i9 @fshr_zero_shift_guard_swapped(i9 %x, i9 %y, i9 %sh) {
665 ; CHECK-LABEL: @fshr_zero_shift_guard_swapped(
666 ; CHECK-NEXT: [[C:%.*]] = icmp ne i9 [[SH:%.*]], 0
667 ; CHECK-NEXT: [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[Y:%.*]], i9 [[SH]])
668 ; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i9 [[F]], i9 [[Y]]
669 ; CHECK-NEXT: ret i9 [[S]]
671 %c = icmp ne i9 %sh, 0
672 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh)
673 %s = select i1 %c, i9 %f, i9 %y
677 ; When the shift amount is 0, fshr returns its 2nd parameter (y), so everything is deleted.
679 define i9 @fshr_zero_shift_guard_inverted(i9 %x, i9 %y, i9 %sh) {
680 ; CHECK-LABEL: @fshr_zero_shift_guard_inverted(
681 ; CHECK-NEXT: ret i9 [[Y:%.*]]
683 %c = icmp eq i9 %sh, 0
684 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh)
685 %s = select i1 %c, i9 %f, i9 %y
689 ; When the shift amount is 0, fshr returns its 2nd parameter (y), so everything is deleted.
691 define i9 @fshr_zero_shift_guard_inverted_swapped(i9 %x, i9 %y, i9 %sh) {
692 ; CHECK-LABEL: @fshr_zero_shift_guard_inverted_swapped(
693 ; CHECK-NEXT: ret i9 [[Y:%.*]]
695 %c = icmp ne i9 %sh, 0
696 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh)
697 %s = select i1 %c, i9 %y, i9 %f
701 ; When the shift amount is 0, fshl returns its 1st parameter (x), so the guard is not needed.
703 define i8 @rotl_zero_shift_guard(i8 %x, i8 %sh) {
704 ; CHECK-LABEL: @rotl_zero_shift_guard(
705 ; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[SH:%.*]])
706 ; CHECK-NEXT: ret i8 [[F]]
708 %c = icmp eq i8 %sh, 0
709 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh)
710 %s = select i1 %c, i8 %x, i8 %f
714 ; When the shift amount is 0, fshl returns its 1st parameter (x), so the guard is not needed.
716 define i8 @rotl_zero_shift_guard_swapped(i8 %x, i8 %sh) {
717 ; CHECK-LABEL: @rotl_zero_shift_guard_swapped(
718 ; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[SH:%.*]])
719 ; CHECK-NEXT: ret i8 [[F]]
721 %c = icmp ne i8 %sh, 0
722 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh)
723 %s = select i1 %c, i8 %f, i8 %x
727 ; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted.
729 define i8 @rotl_zero_shift_guard_inverted(i8 %x, i8 %sh) {
730 ; CHECK-LABEL: @rotl_zero_shift_guard_inverted(
731 ; CHECK-NEXT: ret i8 [[X:%.*]]
733 %c = icmp eq i8 %sh, 0
734 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh)
735 %s = select i1 %c, i8 %f, i8 %x
739 ; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted.
741 define i8 @rotl_zero_shift_guard_inverted_swapped(i8 %x, i8 %sh) {
742 ; CHECK-LABEL: @rotl_zero_shift_guard_inverted_swapped(
743 ; CHECK-NEXT: ret i8 [[X:%.*]]
745 %c = icmp ne i8 %sh, 0
746 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh)
747 %s = select i1 %c, i8 %x, i8 %f
751 ; When the shift amount is 0, fshr returns its 2nd parameter (x), so the guard is not needed.
753 define i9 @rotr_zero_shift_guard(i9 %x, i9 %sh) {
754 ; CHECK-LABEL: @rotr_zero_shift_guard(
755 ; CHECK-NEXT: [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[X]], i9 [[SH:%.*]])
756 ; CHECK-NEXT: ret i9 [[F]]
758 %c = icmp eq i9 %sh, 0
759 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh)
760 %s = select i1 %c, i9 %x, i9 %f
764 ; When the shift amount is 0, fshr returns its 2nd parameter (x), so the guard is not needed.
766 define i9 @rotr_zero_shift_guard_swapped(i9 %x, i9 %sh) {
767 ; CHECK-LABEL: @rotr_zero_shift_guard_swapped(
768 ; CHECK-NEXT: [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[X]], i9 [[SH:%.*]])
769 ; CHECK-NEXT: ret i9 [[F]]
771 %c = icmp ne i9 %sh, 0
772 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh)
773 %s = select i1 %c, i9 %f, i9 %x
777 ; When the shift amount is 0, fshr returns its 2nd parameter (x), so everything is deleted.
779 define i9 @rotr_zero_shift_guard_inverted(i9 %x, i9 %sh) {
780 ; CHECK-LABEL: @rotr_zero_shift_guard_inverted(
781 ; CHECK-NEXT: ret i9 [[X:%.*]]
783 %c = icmp eq i9 %sh, 0
784 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh)
785 %s = select i1 %c, i9 %f, i9 %x
789 ; When the shift amount is 0, fshr returns its 2nd parameter (x), so everything is deleted.
791 define i9 @rotr_zero_shift_guard_inverted_swapped(i9 %x, i9 %sh) {
792 ; CHECK-LABEL: @rotr_zero_shift_guard_inverted_swapped(
793 ; CHECK-NEXT: ret i9 [[X:%.*]]
795 %c = icmp ne i9 %sh, 0
796 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh)
797 %s = select i1 %c, i9 %x, i9 %f
801 ; Negative test - make sure we're matching the correct parameter of fshl.
803 define i8 @fshl_zero_shift_guard_wrong_select_op(i8 %x, i8 %y, i8 %sh) {
804 ; CHECK-LABEL: @fshl_zero_shift_guard_wrong_select_op(
805 ; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[SH:%.*]], 0
806 ; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[SH]])
807 ; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i8 [[Y]], i8 [[F]]
808 ; CHECK-NEXT: ret i8 [[S]]
810 %c = icmp eq i8 %sh, 0
811 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh)
812 %s = select i1 %c, i8 %y, i8 %f
816 ; Vector types work too.
818 define <2 x i8> @rotr_zero_shift_guard_splat(<2 x i8> %x, <2 x i8> %sh) {
819 ; CHECK-LABEL: @rotr_zero_shift_guard_splat(
820 ; CHECK-NEXT: [[F:%.*]] = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> [[X:%.*]], <2 x i8> [[X]], <2 x i8> [[SH:%.*]])
821 ; CHECK-NEXT: ret <2 x i8> [[F]]
823 %c = icmp eq <2 x i8> %sh, zeroinitializer
824 %f = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> %x, <2 x i8> %x, <2 x i8> %sh)
825 %s = select <2 x i1> %c, <2 x i8> %x, <2 x i8> %f
829 ; If first two operands of funnel shift are undef, the result is undef
831 define i8 @fshl_ops_undef(i8 %shamt) {
832 ; CHECK-LABEL: @fshl_ops_undef(
833 ; CHECK-NEXT: ret i8 undef
835 %r = call i8 @llvm.fshl.i8(i8 undef, i8 undef, i8 %shamt)
839 define i9 @fshr_ops_undef(i9 %shamt) {
840 ; CHECK-LABEL: @fshr_ops_undef(
841 ; CHECK-NEXT: ret i9 undef
843 %r = call i9 @llvm.fshr.i9(i9 undef, i9 undef, i9 %shamt)
847 ; If shift amount is undef, treat it as zero, returning operand 0 or 1
849 define i8 @fshl_shift_undef(i8 %x, i8 %y) {
850 ; CHECK-LABEL: @fshl_shift_undef(
851 ; CHECK-NEXT: ret i8 [[X:%.*]]
853 %r = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 undef)
857 define i9 @fshr_shift_undef(i9 %x, i9 %y) {
858 ; CHECK-LABEL: @fshr_shift_undef(
859 ; CHECK-NEXT: ret i9 [[Y:%.*]]
861 %r = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 undef)
865 ; If one of operands is poison, the result is poison
866 ; TODO: these should be poison
867 define i8 @fshl_ops_poison(i8 %b, i8 %shamt) {
868 ; CHECK-LABEL: @fshl_ops_poison(
869 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.fshl.i8(i8 poison, i8 [[B:%.*]], i8 [[SHAMT:%.*]])
870 ; CHECK-NEXT: ret i8 [[R]]
872 %r = call i8 @llvm.fshl.i8(i8 poison, i8 %b, i8 %shamt)
876 define i8 @fshl_ops_poison2(i8 %shamt) {
877 ; CHECK-LABEL: @fshl_ops_poison2(
878 ; CHECK-NEXT: ret i8 undef
880 %r = call i8 @llvm.fshl.i8(i8 poison, i8 undef, i8 %shamt)
884 define i8 @fshl_ops_poison3(i8 %a, i8 %shamt) {
885 ; CHECK-LABEL: @fshl_ops_poison3(
886 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.fshl.i8(i8 [[A:%.*]], i8 poison, i8 [[SHAMT:%.*]])
887 ; CHECK-NEXT: ret i8 [[R]]
889 %r = call i8 @llvm.fshl.i8(i8 %a, i8 poison, i8 %shamt)
893 define i8 @fshl_ops_poison4(i8 %shamt) {
894 ; CHECK-LABEL: @fshl_ops_poison4(
895 ; CHECK-NEXT: ret i8 undef
897 %r = call i8 @llvm.fshl.i8(i8 undef, i8 poison, i8 %shamt)
901 define i8 @fshl_ops_poison5(i8 %a, i8 %b) {
902 ; CHECK-LABEL: @fshl_ops_poison5(
903 ; CHECK-NEXT: ret i8 [[A:%.*]]
905 %r = call i8 @llvm.fshl.i8(i8 %a, i8 %b, i8 poison)
909 define i8 @fshl_ops_poison6() {
910 ; CHECK-LABEL: @fshl_ops_poison6(
911 ; CHECK-NEXT: ret i8 undef
913 %r = call i8 @llvm.fshl.i8(i8 undef, i8 undef, i8 poison)
917 define i9 @fshr_ops_poison(i9 %b, i9 %shamt) {
918 ; CHECK-LABEL: @fshr_ops_poison(
919 ; CHECK-NEXT: [[R:%.*]] = call i9 @llvm.fshr.i9(i9 poison, i9 [[B:%.*]], i9 [[SHAMT:%.*]])
920 ; CHECK-NEXT: ret i9 [[R]]
922 %r = call i9 @llvm.fshr.i9(i9 poison, i9 %b, i9 %shamt)
926 define i9 @fshr_ops_poison2(i9 %shamt) {
927 ; CHECK-LABEL: @fshr_ops_poison2(
928 ; CHECK-NEXT: ret i9 undef
930 %r = call i9 @llvm.fshr.i9(i9 poison, i9 undef, i9 %shamt)
934 define i9 @fshr_ops_poison3(i9 %a, i9 %shamt) {
935 ; CHECK-LABEL: @fshr_ops_poison3(
936 ; CHECK-NEXT: [[R:%.*]] = call i9 @llvm.fshr.i9(i9 [[A:%.*]], i9 poison, i9 [[SHAMT:%.*]])
937 ; CHECK-NEXT: ret i9 [[R]]
939 %r = call i9 @llvm.fshr.i9(i9 %a, i9 poison, i9 %shamt)
943 define i9 @fshr_ops_poison4(i9 %shamt) {
944 ; CHECK-LABEL: @fshr_ops_poison4(
945 ; CHECK-NEXT: ret i9 undef
947 %r = call i9 @llvm.fshr.i9(i9 undef, i9 poison, i9 %shamt)
951 define i9 @fshr_ops_poison5(i9 %a, i9 %b) {
952 ; CHECK-LABEL: @fshr_ops_poison5(
953 ; CHECK-NEXT: ret i9 [[B:%.*]]
955 %r = call i9 @llvm.fshr.i9(i9 %a, i9 %b, i9 poison)
959 define i9 @fshr_ops_poison6() {
960 ; CHECK-LABEL: @fshr_ops_poison6(
961 ; CHECK-NEXT: ret i9 undef
963 %r = call i9 @llvm.fshr.i9(i9 undef, i9 undef, i9 poison)
967 define i8 @fshl_zero(i8 %shamt) {
968 ; CHECK-LABEL: @fshl_zero(
969 ; CHECK-NEXT: ret i8 0
971 %r = call i8 @llvm.fshl.i8(i8 0, i8 0, i8 %shamt)
975 define <2 x i8> @fshr_zero_vec(<2 x i8> %shamt) {
976 ; CHECK-LABEL: @fshr_zero_vec(
977 ; CHECK-NEXT: ret <2 x i8> zeroinitializer
979 %r = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> zeroinitializer, <2 x i8> <i8 0, i8 poison>, <2 x i8> %shamt)
983 define <2 x i7> @fshl_ones_vec(<2 x i7> %shamt) {
984 ; CHECK-LABEL: @fshl_ones_vec(
985 ; CHECK-NEXT: ret <2 x i7> splat (i7 -1)
987 %r = call <2 x i7> @llvm.fshl.v2i7(<2 x i7> <i7 poison, i7 -1>, <2 x i7> <i7 -1, i7 poison>, <2 x i7> %shamt)
991 define i9 @fshr_ones(i9 %shamt) {
992 ; CHECK-LABEL: @fshr_ones(
993 ; CHECK-NEXT: ret i9 -1
995 %r = call i9 @llvm.fshr.i9(i9 -1, i9 -1, i9 %shamt)
999 declare double @llvm.fma.f64(double,double,double)
1000 declare double @llvm.fmuladd.f64(double,double,double)
1002 define double @fma_undef_op0(double %x, double %y) {
1003 ; CHECK-LABEL: @fma_undef_op0(
1004 ; CHECK-NEXT: ret double 0x7FF8000000000000
1006 %r = call double @llvm.fma.f64(double undef, double %x, double %y)
1010 define double @fma_poison_op0(double %x, double %y) {
1011 ; CHECK-LABEL: @fma_poison_op0(
1012 ; CHECK-NEXT: ret double poison
1014 %r = call double @llvm.fma.f64(double poison, double %x, double %y)
1018 define double @fma_undef_op1(double %x, double %y) {
1019 ; CHECK-LABEL: @fma_undef_op1(
1020 ; CHECK-NEXT: ret double 0x7FF8000000000000
1022 %r = call double @llvm.fma.f64(double %x, double undef, double %y)
1026 define double @fma_poison_op1(double %x, double %y) {
1027 ; CHECK-LABEL: @fma_poison_op1(
1028 ; CHECK-NEXT: ret double poison
1030 %r = call double @llvm.fma.f64(double %x, double poison, double %y)
1034 define double @fma_undef_op2(double %x, double %y) {
1035 ; CHECK-LABEL: @fma_undef_op2(
1036 ; CHECK-NEXT: ret double 0x7FF8000000000000
1038 %r = call double @llvm.fma.f64(double %x, double %y, double undef)
1042 define double @fma_poison_op2(double %x, double %y) {
1043 ; CHECK-LABEL: @fma_poison_op2(
1044 ; CHECK-NEXT: ret double poison
1046 %r = call double @llvm.fma.f64(double %x, double %y, double poison)
1050 define double @fma_undef_op0_poison_op1(double %x) {
1051 ; CHECK-LABEL: @fma_undef_op0_poison_op1(
1052 ; CHECK-NEXT: ret double poison
1054 %r = call double @llvm.fma.f64(double undef, double poison, double %x)
1058 define double @fma_undef_op0_poison_op2(double %x) {
1059 ; CHECK-LABEL: @fma_undef_op0_poison_op2(
1060 ; CHECK-NEXT: ret double poison
1062 %r = call double @llvm.fma.f64(double undef, double %x, double poison)
1066 define double @fmuladd_undef_op0(double %x, double %y) {
1067 ; CHECK-LABEL: @fmuladd_undef_op0(
1068 ; CHECK-NEXT: ret double 0x7FF8000000000000
1070 %r = call double @llvm.fmuladd.f64(double undef, double %x, double %y)
1074 define double @fmuladd_poison_op0(double %x, double %y) {
1075 ; CHECK-LABEL: @fmuladd_poison_op0(
1076 ; CHECK-NEXT: ret double poison
1078 %r = call double @llvm.fmuladd.f64(double poison, double %x, double %y)
1082 define double @fmuladd_undef_op1(double %x, double %y) {
1083 ; CHECK-LABEL: @fmuladd_undef_op1(
1084 ; CHECK-NEXT: ret double 0x7FF8000000000000
1086 %r = call double @llvm.fmuladd.f64(double %x, double undef, double %y)
1090 define double @fmuladd_poison_op1(double %x, double %y) {
1091 ; CHECK-LABEL: @fmuladd_poison_op1(
1092 ; CHECK-NEXT: ret double poison
1094 %r = call double @llvm.fmuladd.f64(double %x, double poison, double %y)
1098 define double @fmuladd_undef_op2(double %x, double %y) {
1099 ; CHECK-LABEL: @fmuladd_undef_op2(
1100 ; CHECK-NEXT: ret double 0x7FF8000000000000
1102 %r = call double @llvm.fmuladd.f64(double %x, double %y, double undef)
1106 define double @fmuladd_poison_op2(double %x, double %y) {
1107 ; CHECK-LABEL: @fmuladd_poison_op2(
1108 ; CHECK-NEXT: ret double poison
1110 %r = call double @llvm.fmuladd.f64(double %x, double %y, double poison)
1114 define double @fmuladd_nan_op0_poison_op1(double %x) {
1115 ; CHECK-LABEL: @fmuladd_nan_op0_poison_op1(
1116 ; CHECK-NEXT: ret double poison
1118 %r = call double @llvm.fmuladd.f64(double 0x7ff8000000000000, double poison, double %x)
1122 define double @fmuladd_nan_op1_poison_op2(double %x) {
1123 ; CHECK-LABEL: @fmuladd_nan_op1_poison_op2(
1124 ; CHECK-NEXT: ret double poison
1126 %r = call double @llvm.fmuladd.f64(double %x, double 0x7ff8000000000000, double poison)
1130 define double @fma_nan_op0(double %x, double %y) {
1131 ; CHECK-LABEL: @fma_nan_op0(
1132 ; CHECK-NEXT: ret double 0x7FF8000000000000
1134 %r = call double @llvm.fma.f64(double 0x7ff8000000000000, double %x, double %y)
1138 define double @fma_nan_op1(double %x, double %y) {
1139 ; CHECK-LABEL: @fma_nan_op1(
1140 ; CHECK-NEXT: ret double 0x7FF8000000000001
1142 %r = call double @llvm.fma.f64(double %x, double 0x7ff8000000000001, double %y)
1146 define double @fma_nan_op2(double %x, double %y) {
1147 ; CHECK-LABEL: @fma_nan_op2(
1148 ; CHECK-NEXT: ret double 0x7FF8000000000002
1150 %r = call double @llvm.fma.f64(double %x, double %y, double 0x7ff8000000000002)
1154 define double @fmuladd_nan_op0_op1(double %x) {
1155 ; CHECK-LABEL: @fmuladd_nan_op0_op1(
1156 ; CHECK-NEXT: ret double 0x7FF8000000001234
1158 %r = call double @llvm.fmuladd.f64(double 0x7ff8000000001234, double 0x7ff800000000dead, double %x)
1162 define double @fmuladd_nan_op0_op2(double %x) {
1163 ; CHECK-LABEL: @fmuladd_nan_op0_op2(
1164 ; CHECK-NEXT: ret double 0x7FF8000000005678
1166 %r = call double @llvm.fmuladd.f64(double 0x7ff8000000005678, double %x, double 0x7ff800000000dead)
1170 define double @fmuladd_nan_op1_op2(double %x) {
1171 ; CHECK-LABEL: @fmuladd_nan_op1_op2(
1172 ; CHECK-NEXT: ret double 0x7FF80000AAAAAAAA
1174 %r = call double @llvm.fmuladd.f64(double %x, double 0x7ff80000aaaaaaaa, double 0x7ff800000000dead)
1178 define double @fma_nan_multiplicand_inf_zero(double %x) {
1179 ; CHECK-LABEL: @fma_nan_multiplicand_inf_zero(
1180 ; CHECK-NEXT: [[R:%.*]] = call double @llvm.fma.f64(double 0x7FF0000000000000, double 0.000000e+00, double [[X:%.*]])
1181 ; CHECK-NEXT: ret double [[R]]
1183 %r = call double @llvm.fma.f64(double 0x7ff0000000000000, double 0.0, double %x)
1187 define double @fma_nan_multiplicand_zero_inf(double %x) {
1188 ; CHECK-LABEL: @fma_nan_multiplicand_zero_inf(
1189 ; CHECK-NEXT: [[R:%.*]] = call double @llvm.fma.f64(double 0.000000e+00, double 0x7FF0000000000000, double [[X:%.*]])
1190 ; CHECK-NEXT: ret double [[R]]
1192 %r = call double @llvm.fma.f64(double 0.0, double 0x7ff0000000000000, double %x)
1196 define double @fma_nan_addend_inf_neginf(double %x, i32 %y) {
1197 ; CHECK-LABEL: @fma_nan_addend_inf_neginf(
1198 ; CHECK-NEXT: [[NOTNAN:%.*]] = uitofp i32 [[Y:%.*]] to double
1199 ; CHECK-NEXT: [[R:%.*]] = call double @llvm.fma.f64(double 0x7FF0000000000000, double [[NOTNAN]], double 0xFFF0000000000000)
1200 ; CHECK-NEXT: ret double [[R]]
1202 %notnan = uitofp i32 %y to double
1203 %r = call double @llvm.fma.f64(double 0x7ff0000000000000, double %notnan, double 0xfff0000000000000)
1207 define double @fma_nan_addend_neginf_inf(double %x, i1 %y) {
1208 ; CHECK-LABEL: @fma_nan_addend_neginf_inf(
1209 ; CHECK-NEXT: [[NOTNAN:%.*]] = select i1 [[Y:%.*]], double 4.200000e+01, double -1.000000e-01
1210 ; CHECK-NEXT: [[R:%.*]] = call double @llvm.fma.f64(double [[NOTNAN]], double 0xFFF0000000000000, double 0x7FF0000000000000)
1211 ; CHECK-NEXT: ret double [[R]]
1213 %notnan = select i1 %y, double 42.0, double -0.1
1214 %r = call double @llvm.fma.f64(double %notnan, double 0xfff0000000000000, double 0x7ff0000000000000)
1218 define double @fmuladd_nan_multiplicand_neginf_zero(double %x) {
1219 ; CHECK-LABEL: @fmuladd_nan_multiplicand_neginf_zero(
1220 ; CHECK-NEXT: [[R:%.*]] = call double @llvm.fmuladd.f64(double 0xFFF0000000000000, double 0.000000e+00, double [[X:%.*]])
1221 ; CHECK-NEXT: ret double [[R]]
1223 %r = call double @llvm.fmuladd.f64(double 0xfff0000000000000, double 0.0, double %x)
1227 define double @fmuladd_nan_multiplicand_negzero_inf(double %x) {
1228 ; CHECK-LABEL: @fmuladd_nan_multiplicand_negzero_inf(
1229 ; CHECK-NEXT: [[R:%.*]] = call double @llvm.fmuladd.f64(double -0.000000e+00, double 0x7FF0000000000000, double [[X:%.*]])
1230 ; CHECK-NEXT: ret double [[R]]
1232 %r = call double @llvm.fmuladd.f64(double -0.0, double 0x7ff0000000000000, double %x)
1236 define double @fmuladd_nan_addend_inf_neginf(double %x, i32 %y) {
1237 ; CHECK-LABEL: @fmuladd_nan_addend_inf_neginf(
1238 ; CHECK-NEXT: [[NOTNAN:%.*]] = sitofp i32 [[Y:%.*]] to double
1239 ; CHECK-NEXT: [[R:%.*]] = call double @llvm.fmuladd.f64(double 0x7FF0000000000000, double [[NOTNAN]], double 0xFFF0000000000000)
1240 ; CHECK-NEXT: ret double [[R]]
1242 %notnan = sitofp i32 %y to double
1243 %r = call double @llvm.fmuladd.f64(double 0x7ff0000000000000, double %notnan, double 0xfff0000000000000)
1247 define double @fmuladd_nan_addend_neginf_inf(double %x, i1 %y) {
1248 ; CHECK-LABEL: @fmuladd_nan_addend_neginf_inf(
1249 ; CHECK-NEXT: [[NOTNAN:%.*]] = select i1 [[Y:%.*]], double 4.200000e+01, double -1.000000e-01
1250 ; CHECK-NEXT: [[R:%.*]] = call double @llvm.fmuladd.f64(double [[NOTNAN]], double 0xFFF0000000000000, double 0x7FF0000000000000)
1251 ; CHECK-NEXT: ret double [[R]]
1253 %notnan = select i1 %y, double 42.0, double -0.1
1254 %r = call double @llvm.fmuladd.f64(double %notnan, double 0xfff0000000000000, double 0x7ff0000000000000)
1258 declare float @llvm.copysign.f32(float, float)
1259 declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>)
1261 define float @copysign_same_operand(float %x) {
1262 ; CHECK-LABEL: @copysign_same_operand(
1263 ; CHECK-NEXT: ret float [[X:%.*]]
1265 %r = call float @llvm.copysign.f32(float %x, float %x)
1269 define <2 x double> @copysign_same_operand_vec(<2 x double> %x) {
1270 ; CHECK-LABEL: @copysign_same_operand_vec(
1271 ; CHECK-NEXT: ret <2 x double> [[X:%.*]]
1273 %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %x, <2 x double> %x)
1277 define float @negated_sign_arg(float %x) {
1278 ; CHECK-LABEL: @negated_sign_arg(
1279 ; CHECK-NEXT: [[NEGX:%.*]] = fsub ninf float -0.000000e+00, [[X:%.*]]
1280 ; CHECK-NEXT: ret float [[NEGX]]
1282 %negx = fsub ninf float -0.0, %x
1283 %r = call arcp float @llvm.copysign.f32(float %x, float %negx)
1287 define <2 x double> @negated_sign_arg_vec(<2 x double> %x) {
1288 ; CHECK-LABEL: @negated_sign_arg_vec(
1289 ; CHECK-NEXT: [[NEGX:%.*]] = fneg afn <2 x double> [[X:%.*]]
1290 ; CHECK-NEXT: ret <2 x double> [[NEGX]]
1292 %negx = fneg afn <2 x double> %x
1293 %r = call arcp <2 x double> @llvm.copysign.v2f64(<2 x double> %x, <2 x double> %negx)
1297 define float @negated_mag_arg(float %x) {
1298 ; CHECK-LABEL: @negated_mag_arg(
1299 ; CHECK-NEXT: ret float [[X:%.*]]
1301 %negx = fneg nnan float %x
1302 %r = call ninf float @llvm.copysign.f32(float %negx, float %x)
1306 define <2 x double> @negated_mag_arg_vec(<2 x double> %x) {
1307 ; CHECK-LABEL: @negated_mag_arg_vec(
1308 ; CHECK-NEXT: ret <2 x double> [[X:%.*]]
1310 %negx = fneg afn <2 x double> %x
1311 %r = call arcp <2 x double> @llvm.copysign.v2f64(<2 x double> %negx, <2 x double> %x)
1315 ; We handle the "returned" attribute only in InstCombine, because the fact
1316 ; that this simplification may replace one call with another may cause issues
1317 ; for call graph passes.
1319 declare i32 @passthru_i32(i32 returned)
1320 declare ptr @passthru_p8(ptr returned)
1322 define i32 @returned_const_int_arg() {
1323 ; CHECK-LABEL: @returned_const_int_arg(
1324 ; CHECK-NEXT: [[X:%.*]] = call i32 @passthru_i32(i32 42)
1325 ; CHECK-NEXT: ret i32 [[X]]
1327 %x = call i32 @passthru_i32(i32 42)
1331 define ptr @returned_const_ptr_arg() {
1332 ; CHECK-LABEL: @returned_const_ptr_arg(
1333 ; CHECK-NEXT: [[X:%.*]] = call ptr @passthru_p8(ptr null)
1334 ; CHECK-NEXT: ret ptr [[X]]
1336 %x = call ptr @passthru_p8(ptr null)
1340 define i32 @returned_var_arg(i32 %arg) {
1341 ; CHECK-LABEL: @returned_var_arg(
1342 ; CHECK-NEXT: [[X:%.*]] = call i32 @passthru_i32(i32 [[ARG:%.*]])
1343 ; CHECK-NEXT: ret i32 [[X]]
1345 %x = call i32 @passthru_i32(i32 %arg)
1349 define i32 @returned_const_int_arg_musttail(i32 %arg) {
1350 ; CHECK-LABEL: @returned_const_int_arg_musttail(
1351 ; CHECK-NEXT: [[X:%.*]] = musttail call i32 @passthru_i32(i32 42)
1352 ; CHECK-NEXT: ret i32 [[X]]
1354 %x = musttail call i32 @passthru_i32(i32 42)
1358 define i32 @returned_var_arg_musttail(i32 %arg) {
1359 ; CHECK-LABEL: @returned_var_arg_musttail(
1360 ; CHECK-NEXT: [[X:%.*]] = musttail call i32 @passthru_i32(i32 [[ARG:%.*]])
1361 ; CHECK-NEXT: ret i32 [[X]]
1363 %x = musttail call i32 @passthru_i32(i32 %arg)
1367 define i32 @call_undef_musttail() {
1368 ; CHECK-LABEL: @call_undef_musttail(
1369 ; CHECK-NEXT: [[X:%.*]] = musttail call i32 undef()
1370 ; CHECK-NEXT: ret i32 [[X]]
1372 %x = musttail call i32 undef()
1376 ; This is not the builtin fmax, so we don't know anything about its behavior.
1378 declare float @fmaxf(float, float)
1380 define float @nobuiltin_fmax() {
1381 ; CHECK-LABEL: @nobuiltin_fmax(
1382 ; CHECK-NEXT: [[M:%.*]] = call float @fmaxf(float 0.000000e+00, float 1.000000e+00) #[[ATTR3:[0-9]+]]
1383 ; CHECK-NEXT: [[R:%.*]] = call float @llvm.fabs.f32(float [[M]])
1384 ; CHECK-NEXT: ret float [[R]]
1386 %m = call float @fmaxf(float 0.0, float 1.0) #0
1387 %r = call float @llvm.fabs.f32(float %m)
1392 declare i32 @llvm.ctpop.i32(i32)
1393 declare <3 x i33> @llvm.ctpop.v3i33(<3 x i33>)
1394 declare i1 @llvm.ctpop.i1(i1)
1395 declare i1 @llvm.ctlz.i1(i1, i1)
1396 declare i1 @llvm.cttz.i1(i1, i1)
1398 define i32 @ctpop_lowbit(i32 %x) {
1399 ; CHECK-LABEL: @ctpop_lowbit(
1400 ; CHECK-NEXT: [[B:%.*]] = and i32 [[X:%.*]], 1
1401 ; CHECK-NEXT: ret i32 [[B]]
1404 %r = call i32 @llvm.ctpop.i32(i32 %b)
1408 ; Negative test - only low bit allowed
1409 ; This could be reduced by instcombine to and+shift.
1411 define i32 @ctpop_pow2(i32 %x) {
1412 ; CHECK-LABEL: @ctpop_pow2(
1413 ; CHECK-NEXT: [[B:%.*]] = and i32 [[X:%.*]], 4
1414 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.ctpop.i32(i32 [[B]])
1415 ; CHECK-NEXT: ret i32 [[R]]
1418 %r = call i32 @llvm.ctpop.i32(i32 %b)
1422 define <3 x i33> @ctpop_signbit(<3 x i33> %x) {
1423 ; CHECK-LABEL: @ctpop_signbit(
1424 ; CHECK-NEXT: [[B:%.*]] = lshr <3 x i33> [[X:%.*]], splat (i33 32)
1425 ; CHECK-NEXT: ret <3 x i33> [[B]]
1427 %b = lshr <3 x i33> %x, <i33 32, i33 32, i33 32>
1428 %r = tail call <3 x i33> @llvm.ctpop.v3i33(<3 x i33> %b)
1432 ; Negative test - only 1 bit allowed
1434 define <3 x i33> @ctpop_notsignbit(<3 x i33> %x) {
1435 ; CHECK-LABEL: @ctpop_notsignbit(
1436 ; CHECK-NEXT: [[B:%.*]] = lshr <3 x i33> [[X:%.*]], splat (i33 31)
1437 ; CHECK-NEXT: [[R:%.*]] = tail call <3 x i33> @llvm.ctpop.v3i33(<3 x i33> [[B]])
1438 ; CHECK-NEXT: ret <3 x i33> [[R]]
1440 %b = lshr <3 x i33> %x, <i33 31, i33 31, i33 31>
1441 %r = tail call <3 x i33> @llvm.ctpop.v3i33(<3 x i33> %b)
1445 define i1 @ctpop_bool(i1 %x) {
1446 ; CHECK-LABEL: @ctpop_bool(
1447 ; CHECK-NEXT: ret i1 [[X:%.*]]
1449 %r = tail call i1 @llvm.ctpop.i1(i1 %x)
1453 declare i32 @llvm.cttz.i32(i32, i1)
1454 declare <3 x i33> @llvm.cttz.v3i33(<3 x i33>, i1)
1456 define i32 @cttz_shl1(i32 %x) {
1457 ; CHECK-LABEL: @cttz_shl1(
1458 ; CHECK-NEXT: ret i32 [[X:%.*]]
1461 %r = call i32 @llvm.cttz.i32(i32 %s, i1 true)
1465 define <3 x i33> @cttz_shl1_vec(<3 x i33> %x) {
1466 ; CHECK-LABEL: @cttz_shl1_vec(
1467 ; CHECK-NEXT: ret <3 x i33> [[X:%.*]]
1469 %s = shl <3 x i33> <i33 1, i33 1, i33 poison>, %x
1470 %r = call <3 x i33> @llvm.cttz.v3i33(<3 x i33> %s, i1 false)
1474 ; Negative test - this could be generalized in instcombine though.
1476 define i32 @cttz_shl_not_low_bit(i32 %x) {
1477 ; CHECK-LABEL: @cttz_shl_not_low_bit(
1478 ; CHECK-NEXT: [[S:%.*]] = shl i32 2, [[X:%.*]]
1479 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.cttz.i32(i32 [[S]], i1 true)
1480 ; CHECK-NEXT: ret i32 [[R]]
1483 %r = call i32 @llvm.cttz.i32(i32 %s, i1 true)
1487 declare i32 @llvm.ctlz.i32(i32, i1)
1488 declare <3 x i33> @llvm.ctlz.v3i33(<3 x i33>, i1)
1490 define i32 @ctlz_lshr_sign_bit(i32 %x) {
1491 ; CHECK-LABEL: @ctlz_lshr_sign_bit(
1492 ; CHECK-NEXT: ret i32 [[X:%.*]]
1494 %s = lshr i32 2147483648, %x
1495 %r = call i32 @llvm.ctlz.i32(i32 %s, i1 true)
1499 define i32 @ctlz_lshr_negative(i32 %x) {
1500 ; CHECK-LABEL: @ctlz_lshr_negative(
1501 ; CHECK-NEXT: ret i32 [[X:%.*]]
1503 %s = lshr i32 -42, %x
1504 %r = call i32 @llvm.ctlz.i32(i32 %s, i1 true)
1508 define <3 x i33> @ctlz_lshr_sign_bit_vec(<3 x i33> %x) {
1509 ; CHECK-LABEL: @ctlz_lshr_sign_bit_vec(
1510 ; CHECK-NEXT: ret <3 x i33> [[X:%.*]]
1512 %s = lshr <3 x i33> <i33 poison, i33 4294967296, i33 4294967296>, %x
1513 %r = call <3 x i33> @llvm.ctlz.v3i33(<3 x i33> %s, i1 false)
1517 ; Negative test - this could be generalized in instcombine though.
1519 define i32 @ctlz_lshr_not_negative(i32 %x) {
1520 ; CHECK-LABEL: @ctlz_lshr_not_negative(
1521 ; CHECK-NEXT: [[S:%.*]] = lshr i32 42, [[X:%.*]]
1522 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.ctlz.i32(i32 [[S]], i1 true)
1523 ; CHECK-NEXT: ret i32 [[R]]
1525 %s = lshr i32 42, %x
1526 %r = call i32 @llvm.ctlz.i32(i32 %s, i1 true)
1530 define i32 @ctlz_ashr_sign_bit(i32 %x) {
1531 ; CHECK-LABEL: @ctlz_ashr_sign_bit(
1532 ; CHECK-NEXT: ret i32 0
1534 %s = ashr i32 2147483648, %x
1535 %r = call i32 @llvm.ctlz.i32(i32 %s, i1 false)
1539 define i32 @ctlz_ashr_negative(i32 %x) {
1540 ; CHECK-LABEL: @ctlz_ashr_negative(
1541 ; CHECK-NEXT: ret i32 0
1543 %s = ashr i32 -42, %x
1544 %r = call i32 @llvm.ctlz.i32(i32 %s, i1 false)
1548 define <3 x i33> @ctlz_ashr_sign_bit_vec(<3 x i33> %x) {
1549 ; CHECK-LABEL: @ctlz_ashr_sign_bit_vec(
1550 ; CHECK-NEXT: ret <3 x i33> zeroinitializer
1552 %s = ashr <3 x i33> <i33 4294967296, i33 poison, i33 4294967296>, %x
1553 %r = call <3 x i33> @llvm.ctlz.v3i33(<3 x i33> %s, i1 true)
1557 declare ptr @llvm.ptrmask.p0.i64(ptr , i64)
1559 define i1 @capture_vs_recurse(i64 %mask) {
1560 ; CHECK-LABEL: @capture_vs_recurse(
1561 ; CHECK-NEXT: [[A:%.*]] = call noalias ptr @malloc(i64 8)
1562 ; CHECK-NEXT: [[B:%.*]] = call nonnull ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 [[MASK:%.*]])
1563 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[A]], [[B]]
1564 ; CHECK-NEXT: ret i1 [[CMP]]
1566 %a = call noalias ptr @malloc(i64 8)
1567 %b = call nonnull ptr @llvm.ptrmask.p0.i64(ptr %a, i64 %mask)
1568 %cmp = icmp eq ptr %a, %b
1572 define i1 @ctlz_i1_non_poison_eq_false(i1 %x) {
1573 ; CHECK-LABEL: @ctlz_i1_non_poison_eq_false(
1574 ; CHECK-NEXT: [[CT:%.*]] = call i1 @llvm.ctlz.i1(i1 [[X:%.*]], i1 false)
1575 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i1 [[CT]], false
1576 ; CHECK-NEXT: ret i1 [[CMP]]
1578 %ct = call i1 @llvm.ctlz.i1(i1 %x, i1 false)
1579 %cmp = icmp eq i1 %ct, false
1583 define i1 @ctlz_i1_poison_eq_false(i1 %x) {
1584 ; CHECK-LABEL: @ctlz_i1_poison_eq_false(
1585 ; CHECK-NEXT: [[CT:%.*]] = call i1 @llvm.ctlz.i1(i1 [[X:%.*]], i1 true)
1586 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i1 [[CT]], false
1587 ; CHECK-NEXT: ret i1 [[CMP]]
1589 %ct = call i1 @llvm.ctlz.i1(i1 %x, i1 true)
1590 %cmp = icmp eq i1 %ct, false
1594 define i1 @cttz_i1_non_poison_eq_false(i1 %x) {
1595 ; CHECK-LABEL: @cttz_i1_non_poison_eq_false(
1596 ; CHECK-NEXT: [[CT:%.*]] = call i1 @llvm.cttz.i1(i1 [[X:%.*]], i1 false)
1597 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i1 [[CT]], false
1598 ; CHECK-NEXT: ret i1 [[CMP]]
1600 %ct = call i1 @llvm.cttz.i1(i1 %x, i1 false)
1601 %cmp = icmp eq i1 %ct, false
1605 define i1 @cttz_i1_poison_eq_false(i1 %x) {
1606 ; CHECK-LABEL: @cttz_i1_poison_eq_false(
1607 ; CHECK-NEXT: [[CT:%.*]] = call i1 @llvm.cttz.i1(i1 [[X:%.*]], i1 true)
1608 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i1 [[CT]], false
1609 ; CHECK-NEXT: ret i1 [[CMP]]
1611 %ct = call i1 @llvm.cttz.i1(i1 %x, i1 true)
1612 %cmp = icmp eq i1 %ct, false
1616 define i1 @ctpop_i1_non_poison_eq_false(i1 %x) {
1617 ; CHECK-LABEL: @ctpop_i1_non_poison_eq_false(
1618 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i1 [[X:%.*]], false
1619 ; CHECK-NEXT: ret i1 [[CMP]]
1621 %ct = call i1 @llvm.ctpop.i1(i1 %x)
1622 %cmp = icmp eq i1 %ct, false
1626 attributes #0 = { nobuiltin readnone }