1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -aa-pipeline=basic-aa -passes='dse,verify<memoryssa>' -S | FileCheck %s
3 target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
5 declare void @memset_pattern16(ptr, ptr, i64)
7 declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
8 declare void @llvm.memset.element.unordered.atomic.p0.i64(ptr nocapture, i8, i64, i32) nounwind
9 declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
10 declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i32) nounwind
11 declare void @llvm.init.trampoline(ptr, ptr, ptr)
13 ; **** Noop load->store tests **************************************************
15 ; We CAN optimize volatile loads.
16 define void @test_load_volatile(ptr %Q) {
17 ; CHECK-LABEL: @test_load_volatile(
18 ; CHECK-NEXT: [[A:%.*]] = load volatile i32, ptr [[Q:%.*]], align 4
19 ; CHECK-NEXT: ret void
21 %a = load volatile i32, ptr %Q
26 ; We can NOT optimize volatile stores.
27 define void @test_store_volatile(ptr %Q) {
28 ; CHECK-LABEL: @test_store_volatile(
29 ; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[Q:%.*]], align 4
30 ; CHECK-NEXT: store volatile i32 [[A]], ptr [[Q]], align 4
31 ; CHECK-NEXT: ret void
34 store volatile i32 %a, ptr %Q
38 ; PR2599 - load -> store to same address.
39 define void @test12(ptr %x) nounwind {
40 ; CHECK-LABEL: @test12(
41 ; CHECK-NEXT: [[TEMP7:%.*]] = getelementptr { i32, i32 }, ptr [[X:%.*]], i32 0, i32 1
42 ; CHECK-NEXT: [[TEMP8:%.*]] = load i32, ptr [[TEMP7]], align 4
43 ; CHECK-NEXT: [[TEMP17:%.*]] = sub i32 0, [[TEMP8]]
44 ; CHECK-NEXT: store i32 [[TEMP17]], ptr [[TEMP7]], align 4
45 ; CHECK-NEXT: ret void
47 %temp4 = getelementptr { i32, i32 }, ptr %x, i32 0, i32 0
48 %temp5 = load i32, ptr %temp4, align 4
49 %temp7 = getelementptr { i32, i32 }, ptr %x, i32 0, i32 1
50 %temp8 = load i32, ptr %temp7, align 4
51 %temp17 = sub i32 0, %temp8
52 store i32 %temp5, ptr %temp4, align 4
53 store i32 %temp17, ptr %temp7, align 4
57 ; Remove redundant store if loaded value is in another block.
58 define i32 @test26(i1 %c, ptr %p) {
59 ; CHECK-LABEL: @test26(
61 ; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
63 ; CHECK-NEXT: br label [[BB3:%.*]]
65 ; CHECK-NEXT: br label [[BB3]]
67 ; CHECK-NEXT: ret i32 0
70 %v = load i32, ptr %p, align 4
71 br i1 %c, label %bb1, label %bb2
75 store i32 %v, ptr %p, align 4
81 ; Remove redundant store if loaded value is in another block.
82 define i32 @test27(i1 %c, ptr %p) {
83 ; CHECK-LABEL: @test27(
85 ; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
87 ; CHECK-NEXT: br label [[BB3:%.*]]
89 ; CHECK-NEXT: br label [[BB3]]
91 ; CHECK-NEXT: ret i32 0
94 %v = load i32, ptr %p, align 4
95 br i1 %c, label %bb1, label %bb2
101 store i32 %v, ptr %p, align 4
105 ; Remove redundant store if loaded value is in another block inside a loop.
106 define i32 @test31(i1 %c, ptr %p, i32 %i) {
107 ; CHECK-LABEL: @test31(
109 ; CHECK-NEXT: br label [[BB1:%.*]]
111 ; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1]], label [[BB2:%.*]]
113 ; CHECK-NEXT: ret i32 0
116 %v = load i32, ptr %p, align 4
119 store i32 %v, ptr %p, align 4
120 br i1 %c, label %bb1, label %bb2
125 ; Don't remove "redundant" store if %p is possibly stored to.
126 define i32 @test46(i1 %c, ptr %p, ptr %p2, i32 %i) {
127 ; CHECK-LABEL: @test46(
129 ; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
130 ; CHECK-NEXT: br label [[BB1:%.*]]
132 ; CHECK-NEXT: store i32 [[V]], ptr [[P]], align 4
133 ; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1]], label [[BB2:%.*]]
135 ; CHECK-NEXT: store i32 0, ptr [[P2:%.*]], align 4
136 ; CHECK-NEXT: br i1 [[C]], label [[BB3:%.*]], label [[BB1]]
138 ; CHECK-NEXT: ret i32 0
141 %v = load i32, ptr %p, align 4
144 store i32 %v, ptr %p, align 4
145 br i1 %c, label %bb1, label %bb2
147 store i32 0, ptr %p2, align 4
148 br i1 %c, label %bb3, label %bb1
153 declare void @unknown_func()
155 ; Remove redundant store, which is in the lame loop as the load.
156 define i32 @test33(i1 %c, ptr %p, i32 %i) {
157 ; CHECK-LABEL: @test33(
159 ; CHECK-NEXT: br label [[BB1:%.*]]
161 ; CHECK-NEXT: br label [[BB2:%.*]]
163 ; CHECK-NEXT: call void @unknown_func()
164 ; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1]], label [[BB3:%.*]]
166 ; CHECK-NEXT: ret i32 0
171 %v = load i32, ptr %p, align 4
174 store i32 %v, ptr %p, align 4
175 ; Might read and overwrite value at %p, but doesn't matter.
176 call void @unknown_func()
177 br i1 %c, label %bb1, label %bb3
182 declare void @unkown_write(ptr)
184 ; We can't remove the "noop" store around an unkown write.
185 define void @test43(ptr %Q) {
186 ; CHECK-LABEL: @test43(
187 ; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[Q:%.*]], align 4
188 ; CHECK-NEXT: call void @unkown_write(ptr [[Q]])
189 ; CHECK-NEXT: store i32 [[A]], ptr [[Q]], align 4
190 ; CHECK-NEXT: ret void
192 %a = load i32, ptr %Q
193 call void @unkown_write(ptr %Q)
198 ; We CAN remove it when the unkown write comes AFTER.
199 define void @test44(ptr %Q) {
200 ; CHECK-LABEL: @test44(
201 ; CHECK-NEXT: call void @unkown_write(ptr [[Q:%.*]])
202 ; CHECK-NEXT: ret void
204 %a = load i32, ptr %Q
206 call void @unkown_write(ptr %Q)
210 define void @test45(ptr %Q) {
211 ; CHECK-LABEL: @test45(
212 ; CHECK-NEXT: ret void
214 %a = load i32, ptr %Q
220 define i32 @test48(i1 %c, ptr %p) {
221 ; CHECK-LABEL: @test48(
223 ; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
224 ; CHECK-NEXT: br i1 [[C:%.*]], label [[BB0:%.*]], label [[BB0_0:%.*]]
226 ; CHECK-NEXT: store i32 0, ptr [[P]], align 4
227 ; CHECK-NEXT: br i1 [[C]], label [[BB1:%.*]], label [[BB2:%.*]]
229 ; CHECK-NEXT: br label [[BB1]]
231 ; CHECK-NEXT: store i32 [[V]], ptr [[P]], align 4
232 ; CHECK-NEXT: br i1 [[C]], label [[BB2]], label [[BB0]]
234 ; CHECK-NEXT: ret i32 0
237 %v = load i32, ptr %p, align 4
238 br i1 %c, label %bb0, label %bb0.0
242 br i1 %c, label %bb1, label %bb2
248 store i32 %v, ptr %p, align 4
249 br i1 %c, label %bb2, label %bb0
254 define i32 @test47(i1 %c, ptr %p, i32 %i) {
255 ; CHECK-LABEL: @test47(
257 ; CHECK-NEXT: br label [[BB1:%.*]]
259 ; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1]], label [[BB2:%.*]]
261 ; CHECK-NEXT: br i1 [[C]], label [[BB3:%.*]], label [[BB1]]
263 ; CHECK-NEXT: ret i32 0
266 %v = load i32, ptr %p, align 4
269 store i32 %v, ptr %p, align 4
270 br i1 %c, label %bb1, label %bb2
272 store i32 %v, ptr %p, align 4
273 br i1 %c, label %bb3, label %bb1
278 ; Test case from PR47887.
279 define void @test_noalias_store_between_load_and_store(ptr noalias %x, ptr noalias %y) {
280 ; CHECK-LABEL: @test_noalias_store_between_load_and_store(
282 ; CHECK-NEXT: store i32 0, ptr [[Y:%.*]], align 4
283 ; CHECK-NEXT: ret void
286 %lv = load i32, ptr %x, align 4
287 store i32 0, ptr %y, align 4
288 store i32 %lv, ptr %x, align 4
292 ; Test case from PR47887. Currently we eliminate the dead `store i32 %inc, ptr %x`,
293 ; but not the no-op `store i32 %lv, ptr %x`. That is because no-op stores are
294 ; eliminated before dead stores for the same def.
295 define void @test_noalias_store_between_load_and_store_elimin_order(ptr noalias %x, ptr noalias %y) {
296 ; CHECK-LABEL: @test_noalias_store_between_load_and_store_elimin_order(
298 ; CHECK-NEXT: store i32 0, ptr [[Y:%.*]], align 4
299 ; CHECK-NEXT: ret void
302 %lv = load i32, ptr %x, align 4
303 %inc = add nsw i32 %lv, 1
304 store i32 %inc, ptr %x, align 4
305 store i32 0, ptr %y, align 4
306 store i32 %lv, ptr %x, align 4
310 declare noalias ptr @malloc(i64)
311 declare noalias ptr @_Znwm(i64)
312 declare void @clobber_memory(ptr)
314 ; based on pr25892_lite
315 define ptr @zero_memset_after_malloc(i64 %size) {
316 ; CHECK-LABEL: @zero_memset_after_malloc(
317 ; CHECK-NEXT: [[CALLOC:%.*]] = call ptr @calloc(i64 1, i64 [[SIZE:%.*]])
318 ; CHECK-NEXT: ret ptr [[CALLOC]]
320 %call = call ptr @malloc(i64 %size) inaccessiblememonly
321 call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size, i1 false)
325 ; based on pr25892_lite
326 define ptr @zero_memset_after_malloc_with_intermediate_clobbering(i64 %size) {
327 ; CHECK-LABEL: @zero_memset_after_malloc_with_intermediate_clobbering(
328 ; CHECK-NEXT: [[CALL:%.*]] = call ptr @malloc(i64 [[SIZE:%.*]]) #[[ATTR7:[0-9]+]]
329 ; CHECK-NEXT: call void @clobber_memory(ptr [[CALL]])
330 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 [[SIZE]], i1 false)
331 ; CHECK-NEXT: ret ptr [[CALL]]
333 %call = call ptr @malloc(i64 %size) inaccessiblememonly
334 call void @clobber_memory(ptr %call)
335 call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size, i1 false)
339 ; based on pr25892_lite
340 define ptr @zero_memset_after_malloc_with_different_sizes(i64 %size) {
341 ; CHECK-LABEL: @zero_memset_after_malloc_with_different_sizes(
342 ; CHECK-NEXT: [[CALL:%.*]] = call ptr @malloc(i64 [[SIZE:%.*]]) #[[ATTR7]]
343 ; CHECK-NEXT: [[SIZE2:%.*]] = add nsw i64 [[SIZE]], -1
344 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 [[SIZE2]], i1 false)
345 ; CHECK-NEXT: ret ptr [[CALL]]
347 %call = call ptr @malloc(i64 %size) inaccessiblememonly
348 %size2 = add nsw i64 %size, -1
349 call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size2, i1 false)
353 ; based on pr25892_lite
354 define ptr @zero_memset_after_new(i64 %size) {
355 ; CHECK-LABEL: @zero_memset_after_new(
356 ; CHECK-NEXT: [[CALL:%.*]] = call ptr @_Znwm(i64 [[SIZE:%.*]])
357 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 [[SIZE]], i1 false)
358 ; CHECK-NEXT: ret ptr [[CALL]]
360 %call = call ptr @_Znwm(i64 %size)
361 call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size, i1 false)
365 ; This should not create a calloc and should not crash the compiler.
366 define ptr @notmalloc_memset(i64 %size, ptr %notmalloc) {
367 ; CHECK-LABEL: @notmalloc_memset(
368 ; CHECK-NEXT: [[CALL1:%.*]] = call ptr [[NOTMALLOC:%.*]](i64 [[SIZE:%.*]])
369 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL1]], i8 0, i64 [[SIZE]], i1 false)
370 ; CHECK-NEXT: ret ptr [[CALL1]]
372 %call1 = call ptr %notmalloc(i64 %size)
373 call void @llvm.memset.p0.i64(ptr %call1, i8 0, i64 %size, i1 false)
377 ; This should not create recursive call to calloc.
378 define ptr @calloc(i64 %nmemb, i64 %size) inaccessiblememonly {
379 ; CHECK-LABEL: @calloc(
381 ; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[SIZE:%.*]], [[NMEMB:%.*]]
382 ; CHECK-NEXT: [[CALL:%.*]] = tail call noalias align 16 ptr @malloc(i64 [[MUL]])
383 ; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq ptr [[CALL]], null
384 ; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
386 ; CHECK-NEXT: tail call void @llvm.memset.p0.i64(ptr nonnull align 16 [[CALL]], i8 0, i64 [[MUL]], i1 false)
387 ; CHECK-NEXT: br label [[IF_END]]
389 ; CHECK-NEXT: ret ptr [[CALL]]
392 %mul = mul i64 %size, %nmemb
393 %call = tail call noalias align 16 ptr @malloc(i64 %mul)
394 %tobool.not = icmp eq ptr %call, null
395 br i1 %tobool.not, label %if.end, label %if.then
397 if.then: ; preds = %entry
398 tail call void @llvm.memset.p0.i64(ptr nonnull align 16 %call, i8 0, i64 %mul, i1 false)
401 if.end: ; preds = %if.then, %entry
405 define ptr @pr25892(i64 %size) {
406 ; CHECK-LABEL: @pr25892(
408 ; CHECK-NEXT: [[CALLOC:%.*]] = call ptr @calloc(i64 1, i64 [[SIZE:%.*]])
409 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[CALLOC]], null
410 ; CHECK-NEXT: br i1 [[CMP]], label [[CLEANUP:%.*]], label [[IF_END:%.*]]
412 ; CHECK-NEXT: br label [[CLEANUP]]
414 ; CHECK-NEXT: [[RETVAL_0:%.*]] = phi ptr [ [[CALLOC]], [[IF_END]] ], [ null, [[ENTRY:%.*]] ]
415 ; CHECK-NEXT: ret ptr [[RETVAL_0]]
418 %call = call ptr @malloc(i64 %size) inaccessiblememonly
419 %cmp = icmp eq ptr %call, null
420 br i1 %cmp, label %cleanup, label %if.end
422 call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size, i1 false)
425 %retval.0 = phi ptr [ %call, %if.end ], [ null, %entry ]
429 define ptr @pr25892_with_extra_store(i64 %size) {
430 ; CHECK-LABEL: @pr25892_with_extra_store(
432 ; CHECK-NEXT: [[CALLOC:%.*]] = call ptr @calloc(i64 1, i64 [[SIZE:%.*]])
433 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[CALLOC]], null
434 ; CHECK-NEXT: br i1 [[CMP]], label [[CLEANUP:%.*]], label [[IF_END:%.*]]
436 ; CHECK-NEXT: br label [[CLEANUP]]
438 ; CHECK-NEXT: [[RETVAL_0:%.*]] = phi ptr [ [[CALLOC]], [[IF_END]] ], [ null, [[ENTRY:%.*]] ]
439 ; CHECK-NEXT: ret ptr [[RETVAL_0]]
442 %call = call ptr @malloc(i64 %size) inaccessiblememonly
443 %cmp = icmp eq ptr %call, null
444 br i1 %cmp, label %cleanup, label %if.end
446 call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size, i1 false)
447 store i8 0, ptr %call, align 1
450 %retval.0 = phi ptr [ %call, %if.end ], [ null, %entry ]
454 ; This should not create a calloc
455 define ptr @malloc_with_no_nointer_null_check(i64 %0, i32 %1) {
456 ; CHECK-LABEL: @malloc_with_no_nointer_null_check(
458 ; CHECK-NEXT: [[CALL:%.*]] = call ptr @malloc(i64 [[TMP0:%.*]]) #[[ATTR7]]
459 ; CHECK-NEXT: [[A:%.*]] = and i32 [[TMP1:%.*]], 32
460 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], 0
461 ; CHECK-NEXT: br i1 [[CMP]], label [[CLEANUP:%.*]], label [[IF_END:%.*]]
463 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 [[TMP0]], i1 false)
464 ; CHECK-NEXT: br label [[CLEANUP]]
466 ; CHECK-NEXT: ret ptr [[CALL]]
469 %call = call ptr @malloc(i64 %0) inaccessiblememonly
471 %cmp = icmp eq i32 %a, 0
472 br i1 %cmp, label %cleanup, label %if.end
474 call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %0, i1 false)
481 define ptr @store_zero_after_calloc_inaccessiblememonly() {
482 ; CHECK-LABEL: @store_zero_after_calloc_inaccessiblememonly(
483 ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 1, i64 10) #[[ATTR7]]
484 ; CHECK-NEXT: ret ptr [[CALL]]
486 %call = tail call ptr @calloc(i64 1, i64 10) inaccessiblememonly
487 store i8 0, ptr %call
491 define ptr @zero_memset_after_calloc() {
492 ; CHECK-LABEL: @zero_memset_after_calloc(
493 ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
494 ; CHECK-NEXT: ret ptr [[CALL]]
496 %call = tail call ptr @calloc(i64 10000, i64 4)
497 call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 40000, i1 false)
501 define ptr @volatile_zero_memset_after_calloc() {
502 ; CHECK-LABEL: @volatile_zero_memset_after_calloc(
503 ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
504 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 40000, i1 true)
505 ; CHECK-NEXT: ret ptr [[CALL]]
507 %call = tail call ptr @calloc(i64 10000, i64 4)
508 call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 40000, i1 true)
512 define ptr @zero_memset_and_store_after_calloc(i8 %v) {
513 ; CHECK-LABEL: @zero_memset_and_store_after_calloc(
514 ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
515 ; CHECK-NEXT: ret ptr [[CALL]]
517 %call = tail call ptr @calloc(i64 10000, i64 4)
518 store i8 %v, ptr %call
519 call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 40000, i1 false)
523 define ptr @partial_zero_memset_after_calloc() {
524 ; CHECK-LABEL: @partial_zero_memset_after_calloc(
525 ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
526 ; CHECK-NEXT: ret ptr [[CALL]]
528 %call = tail call ptr @calloc(i64 10000, i64 4)
529 call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 20, i1 false)
533 define ptr @partial_zero_memset_and_store_after_calloc(i8 %v) {
534 ; CHECK-LABEL: @partial_zero_memset_and_store_after_calloc(
535 ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
536 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 30
537 ; CHECK-NEXT: store i8 [[V:%.*]], ptr [[GEP]], align 1
538 ; CHECK-NEXT: ret ptr [[CALL]]
540 %call = tail call ptr @calloc(i64 10000, i64 4)
541 %gep = getelementptr inbounds i8, ptr %call, i64 30
542 store i8 %v, ptr %gep
543 call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 20, i1 false)
547 define ptr @zero_memset_and_store_with_dyn_index_after_calloc(i8 %v, i64 %idx) {
548 ; CHECK-LABEL: @zero_memset_and_store_with_dyn_index_after_calloc(
549 ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
550 ; CHECK-NEXT: ret ptr [[CALL]]
552 %call = tail call ptr @calloc(i64 10000, i64 4)
553 %gep = getelementptr inbounds i8, ptr %call, i64 %idx
554 store i8 %v, ptr %gep
555 call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 40000, i1 false)
559 define ptr @partial_zero_memset_and_store_with_dyn_index_after_calloc(i8 %v, i64 %idx) {
560 ; CHECK-LABEL: @partial_zero_memset_and_store_with_dyn_index_after_calloc(
561 ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
562 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 [[IDX:%.*]]
563 ; CHECK-NEXT: store i8 [[V:%.*]], ptr [[GEP]], align 1
564 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 20, i1 false)
565 ; CHECK-NEXT: ret ptr [[CALL]]
567 %call = tail call ptr @calloc(i64 10000, i64 4)
568 %gep = getelementptr inbounds i8, ptr %call, i64 %idx
569 store i8 %v, ptr %gep
570 call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 20, i1 false)
574 define ptr @zero_memset_after_calloc_inaccessiblememonly() {
575 ; CHECK-LABEL: @zero_memset_after_calloc_inaccessiblememonly(
576 ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4) #[[ATTR7]]
577 ; CHECK-NEXT: ret ptr [[CALL]]
579 %call = tail call ptr @calloc(i64 10000, i64 4) inaccessiblememonly
580 call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 40000, i1 false)
584 define ptr @cst_nonzero_memset_after_calloc() {
585 ; CHECK-LABEL: @cst_nonzero_memset_after_calloc(
586 ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
587 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL]], i8 1, i64 40000, i1 false)
588 ; CHECK-NEXT: ret ptr [[CALL]]
590 %call = tail call ptr @calloc(i64 10000, i64 4)
591 call void @llvm.memset.p0.i64(ptr %call, i8 1, i64 40000, i1 false)
595 define ptr @nonzero_memset_after_calloc(i8 %v) {
596 ; CHECK-LABEL: @nonzero_memset_after_calloc(
597 ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
598 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL]], i8 [[V:%.*]], i64 40000, i1 false)
599 ; CHECK-NEXT: ret ptr [[CALL]]
601 %call = tail call ptr @calloc(i64 10000, i64 4)
602 call void @llvm.memset.p0.i64(ptr %call, i8 %v, i64 40000, i1 false)
607 ; The first memset is dead, because calloc provides zero-filled memory.
608 ; TODO: This could be replaced with a call to malloc + memset_pattern16.
609 define ptr @memset_pattern16_after_calloc(ptr %pat) {
610 ; CHECK-LABEL: @memset_pattern16_after_calloc(
611 ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4)
612 ; CHECK-NEXT: call void @memset_pattern16(ptr [[CALL]], ptr [[PAT:%.*]], i64 40000)
613 ; CHECK-NEXT: ret ptr [[CALL]]
615 %call = tail call ptr @calloc(i64 10000, i64 4) #1
616 call void @llvm.memset.p0.i64(ptr align 4 %call, i8 0, i64 40000, i1 false)
617 call void @memset_pattern16(ptr %call, ptr %pat, i64 40000) #1
621 @n = global i32 0, align 4
622 @a = external global i32, align 4
623 @b = external global ptr, align 8
625 ; GCC calloc-1.c test case should create calloc
626 define ptr @test_malloc_memset_to_calloc(ptr %0) {
627 ; CHECK-LABEL: @test_malloc_memset_to_calloc(
629 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @n, align 4
630 ; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[TMP1]] to i64
631 ; CHECK-NEXT: [[CALLOC:%.*]] = call ptr @calloc(i64 1, i64 [[TMP2]])
632 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP0:%.*]], align 8
633 ; CHECK-NEXT: [[TMP4:%.*]] = add nsw i64 [[TMP3]], 1
634 ; CHECK-NEXT: store i64 [[TMP4]], ptr [[TMP0]], align 8
635 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq ptr [[CALLOC]], null
636 ; CHECK-NEXT: br i1 [[TMP5]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
638 ; CHECK-NEXT: [[TMP6:%.*]] = add nsw i64 [[TMP3]], 2
639 ; CHECK-NEXT: store i64 [[TMP6]], ptr [[TMP0]], align 8
640 ; CHECK-NEXT: store i32 2, ptr @a, align 4
641 ; CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr @b, align 8
642 ; CHECK-NEXT: store i32 3, ptr [[TMP7]], align 4
643 ; CHECK-NEXT: br label [[IF_END]]
645 ; CHECK-NEXT: ret ptr [[CALLOC]]
648 %1 = load i32, ptr @n, align 4
649 %2 = sext i32 %1 to i64
650 %3 = tail call ptr @malloc(i64 %2) inaccessiblememonly
651 %4 = load i64, ptr %0, align 8
652 %5 = add nsw i64 %4, 1
653 store i64 %5, ptr %0, align 8
654 %6 = icmp eq ptr %3, null
655 br i1 %6, label %if.end, label %if.then
658 %7 = add nsw i64 %4, 2
659 store i64 %7, ptr %0, align 8
660 store i32 2, ptr @a, align 4
661 tail call void @llvm.memset.p0.i64(ptr align 4 %3, i8 0, i64 %2, i1 false)
662 %8 = load ptr, ptr @b, align 8
663 store i32 3, ptr %8, align 4
670 define ptr @readnone_malloc() {
671 ; CHECK-LABEL: @readnone_malloc(
672 ; CHECK-NEXT: [[ALLOC:%.*]] = call ptr @malloc(i64 16) #[[ATTR8:[0-9]+]]
673 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[ALLOC]], i8 0, i64 16, i1 false)
674 ; CHECK-NEXT: ret ptr [[ALLOC]]
676 %alloc = call ptr @malloc(i64 16) memory(none)
677 call void @llvm.memset.p0.i64(ptr %alloc, i8 0, i64 16, i1 false)
681 define void @store_same_i32_to_mayalias_loc(ptr %q, ptr %p) {
682 ; CHECK-LABEL: @store_same_i32_to_mayalias_loc(
683 ; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
684 ; CHECK-NEXT: store i32 [[V]], ptr [[Q:%.*]], align 4
685 ; CHECK-NEXT: store i32 [[V]], ptr [[P]], align 4
686 ; CHECK-NEXT: ret void
688 %v = load i32, ptr %p, align 4
689 store i32 %v, ptr %q, align 4
690 store i32 %v, ptr %p, align 4
694 define void @store_same_i32_to_mayalias_loc_unalign(ptr %q, ptr %p) {
695 ; CHECK-LABEL: @store_same_i32_to_mayalias_loc_unalign(
696 ; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P:%.*]], align 1
697 ; CHECK-NEXT: store i32 [[V]], ptr [[Q:%.*]], align 1
698 ; CHECK-NEXT: store i32 [[V]], ptr [[P]], align 1
699 ; CHECK-NEXT: ret void
701 %v = load i32, ptr %p, align 1
702 store i32 %v, ptr %q, align 1
703 store i32 %v, ptr %p, align 1
707 define void @store_same_i12_to_mayalias_loc(ptr %q, ptr %p) {
708 ; CHECK-LABEL: @store_same_i12_to_mayalias_loc(
709 ; CHECK-NEXT: [[V:%.*]] = load i12, ptr [[P:%.*]], align 2
710 ; CHECK-NEXT: store i12 [[V]], ptr [[Q:%.*]], align 2
711 ; CHECK-NEXT: store i12 [[V]], ptr [[P]], align 2
712 ; CHECK-NEXT: ret void
714 %v = load i12, ptr %p, align 2
715 store i12 %v, ptr %q, align 2
716 store i12 %v, ptr %p, align 2
720 define void @store_same_i12_to_mayalias_loc_unalign(ptr %q, ptr %p) {
721 ; CHECK-LABEL: @store_same_i12_to_mayalias_loc_unalign(
722 ; CHECK-NEXT: [[V:%.*]] = load i12, ptr [[P:%.*]], align 1
723 ; CHECK-NEXT: store i12 [[V]], ptr [[Q:%.*]], align 1
724 ; CHECK-NEXT: store i12 [[V]], ptr [[P]], align 1
725 ; CHECK-NEXT: ret void
727 %v = load i12, ptr %p, align 1
728 store i12 %v, ptr %q, align 1
729 store i12 %v, ptr %p, align 1
733 define void @store_same_ptr_to_mayalias_loc(ptr %q, ptr %p) {
734 ; CHECK-LABEL: @store_same_ptr_to_mayalias_loc(
735 ; CHECK-NEXT: [[V:%.*]] = load ptr, ptr [[P:%.*]], align 8
736 ; CHECK-NEXT: store ptr [[V]], ptr [[Q:%.*]], align 8
737 ; CHECK-NEXT: store ptr [[V]], ptr [[P]], align 8
738 ; CHECK-NEXT: ret void
740 %v = load ptr, ptr %p, align 8
741 store ptr %v, ptr %q, align 8
742 store ptr %v, ptr %p, align 8
746 define void @store_same_scalable_to_mayalias_loc(ptr %q, ptr %p) {
747 ; CHECK-LABEL: @store_same_scalable_to_mayalias_loc(
748 ; CHECK-NEXT: [[V:%.*]] = load <vscale x 4 x i32>, ptr [[P:%.*]], align 4
749 ; CHECK-NEXT: store <vscale x 4 x i32> [[V]], ptr [[Q:%.*]], align 4
750 ; CHECK-NEXT: store <vscale x 4 x i32> [[V]], ptr [[P]], align 4
751 ; CHECK-NEXT: ret void
753 %v = load <vscale x 4 x i32>, ptr %p, align 4
754 store <vscale x 4 x i32> %v, ptr %q, align 4
755 store <vscale x 4 x i32> %v, ptr %p, align 4
759 define void @store_same_i32_to_mayalias_loc_inconsistent_align(ptr %q, ptr %p) {
760 ; CHECK-LABEL: @store_same_i32_to_mayalias_loc_inconsistent_align(
761 ; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P:%.*]], align 2
762 ; CHECK-NEXT: store i32 [[V]], ptr [[Q:%.*]], align 4
763 ; CHECK-NEXT: store i32 [[V]], ptr [[P]], align 4
764 ; CHECK-NEXT: ret void
766 %v = load i32, ptr %p, align 2
767 store i32 %v, ptr %q, align 4
768 store i32 %v, ptr %p, align 4
772 define void @do_not_crash_on_liveonentrydef(i1 %c, ptr %p, ptr noalias %q) {
773 ; CHECK-LABEL: @do_not_crash_on_liveonentrydef(
775 ; CHECK-NEXT: br i1 [[C:%.*]], label [[IF:%.*]], label [[JOIN:%.*]]
777 ; CHECK-NEXT: store i8 0, ptr [[Q:%.*]], align 1
778 ; CHECK-NEXT: br label [[JOIN]]
780 ; CHECK-NEXT: [[V:%.*]] = load i8, ptr [[Q]], align 1
781 ; CHECK-NEXT: store i8 0, ptr [[P:%.*]], align 1
782 ; CHECK-NEXT: store i8 [[V]], ptr [[Q]], align 1
783 ; CHECK-NEXT: ret void
786 br i1 %c, label %if, label %join
789 store i8 0, ptr %q, align 1
793 %v = load i8, ptr %q, align 1
794 store i8 0, ptr %p, align 1
795 store i8 %v, ptr %q, align 1
799 ; Dominating condition implies value already exists, optimize store
800 define void @remove_tautological_store_eq(ptr %x) {
801 ; CHECK-LABEL: @remove_tautological_store_eq(
803 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
804 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VAL]], 4
805 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[END:%.*]]
807 ; CHECK-NEXT: br label [[END]]
809 ; CHECK-NEXT: ret void
812 %val = load i32, ptr %x, align 4
813 %cmp = icmp eq i32 %val, 4
814 br i1 %cmp, label %if.eq, label %end
817 store i32 4, ptr %x, align 4
824 ; Dominating condition implies value already exists, optimize store
825 define void @remove_tautological_store_var(ptr %x, ptr %y) {
826 ; CHECK-LABEL: @remove_tautological_store_var(
828 ; CHECK-NEXT: [[VALX:%.*]] = load i32, ptr [[X:%.*]], align 4
829 ; CHECK-NEXT: [[VALY:%.*]] = load i32, ptr [[Y:%.*]], align 4
830 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VALX]], [[VALY]]
831 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[END:%.*]]
833 ; CHECK-NEXT: br label [[END]]
835 ; CHECK-NEXT: ret void
838 %valx = load i32, ptr %x, align 4
839 %valy = load i32, ptr %y, align 4
840 %cmp = icmp eq i32 %valx, %valy
841 br i1 %cmp, label %if.eq, label %end
844 store i32 %valy, ptr %x, align 4
851 ; Dominating condition implies value already exists, optimize store
852 define void @remove_tautological_store_ne(ptr %x) {
853 ; CHECK-LABEL: @remove_tautological_store_ne(
855 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
856 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[VAL]], 4
857 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_NE:%.*]], label [[IF_ELSE:%.*]]
859 ; CHECK-NEXT: br label [[END:%.*]]
861 ; CHECK-NEXT: br label [[END]]
863 ; CHECK-NEXT: ret void
866 %val = load i32, ptr %x, align 4
867 %cmp = icmp ne i32 %val, 4
868 br i1 %cmp, label %if.ne, label %if.else
874 store i32 4, ptr %x, align 4
881 ; Dominating condition implies value already exists, optimize store
882 ; Optimizes unordered atomic stores
883 define void @remove_tautological_store_atomic_unordered(ptr %x) {
884 ; CHECK-LABEL: @remove_tautological_store_atomic_unordered(
886 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
887 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VAL]], 4
888 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[END:%.*]]
890 ; CHECK-NEXT: br label [[END]]
892 ; CHECK-NEXT: ret void
895 %val = load i32, ptr %x, align 4
896 %cmp = icmp eq i32 %val, 4
897 br i1 %cmp, label %if.eq, label %end
900 store atomic i32 4, ptr %x unordered, align 4
907 ; Should not optimize ordered atomic stores
908 define void @remove_tautological_store_atomic_monotonic(ptr %x) {
909 ; CHECK-LABEL: @remove_tautological_store_atomic_monotonic(
911 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
912 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VAL]], 4
913 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[END:%.*]]
915 ; CHECK-NEXT: store atomic i32 4, ptr [[X]] monotonic, align 4
916 ; CHECK-NEXT: br label [[END]]
918 ; CHECK-NEXT: ret void
921 %val = load i32, ptr %x, align 4
922 %cmp = icmp eq i32 %val, 4
923 br i1 %cmp, label %if.eq, label %end
926 store atomic i32 4, ptr %x monotonic, align 4
933 ; Should not optimize since the store is in incorrect branch
934 define void @remove_tautological_store_eq_wrong_branch(ptr %x, ptr %y) {
935 ; CHECK-LABEL: @remove_tautological_store_eq_wrong_branch(
937 ; CHECK-NEXT: [[VALX:%.*]] = load i32, ptr [[X:%.*]], align 4
938 ; CHECK-NEXT: [[VALY:%.*]] = load i32, ptr [[Y:%.*]], align 4
939 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VALX]], [[VALY]]
940 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[END:%.*]]
942 ; CHECK-NEXT: br label [[END]]
944 ; CHECK-NEXT: store i32 [[VALY]], ptr [[X]], align 4
945 ; CHECK-NEXT: ret void
948 %valx = load i32, ptr %x, align 4
949 %valy = load i32, ptr %y, align 4
950 %cmp = icmp eq i32 %valx, %valy
951 br i1 %cmp, label %if.eq, label %end
957 store i32 %valy, ptr %x, align 4
961 ; Should not optimize since the store is in incorrect branch
962 define void @remove_tautological_store_ne_wrong_branch(ptr %x) {
963 ; CHECK-LABEL: @remove_tautological_store_ne_wrong_branch(
965 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
966 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[VAL]], 4
967 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_NE:%.*]], label [[END:%.*]]
969 ; CHECK-NEXT: store i32 4, ptr [[X]], align 4
970 ; CHECK-NEXT: br label [[END]]
972 ; CHECK-NEXT: ret void
975 %val = load i32, ptr %x, align 4
976 %cmp = icmp ne i32 %val, 4
977 br i1 %cmp, label %if.ne, label %end
980 store i32 4, ptr %x, align 4
987 ; Dominating condition implies value already exists, optimize store
988 ; Should not optimize since we cannot determine if we should when both
989 ; branches are the same
990 define void @remove_tautological_store_same_branch(ptr %x) {
991 ; CHECK-LABEL: @remove_tautological_store_same_branch(
993 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
994 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VAL]], 4
995 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[IF_EQ]]
997 ; CHECK-NEXT: store i32 4, ptr [[X]], align 4
998 ; CHECK-NEXT: ret void
1001 %val = load i32, ptr %x, align 4
1002 %cmp = icmp eq i32 %val, 4
1003 br i1 %cmp, label %if.eq, label %if.eq
1006 store i32 4, ptr %x, align 4
1010 ; Dominating condition implies value already exists, optimize store
1011 ; Should not optimize since value being stored is different from cond check
1012 define void @remove_tautological_store_wrong_value(ptr %x) {
1013 ; CHECK-LABEL: @remove_tautological_store_wrong_value(
1014 ; CHECK-NEXT: entry:
1015 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
1016 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VAL]], 4
1017 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[END:%.*]]
1019 ; CHECK-NEXT: store i32 5, ptr [[X]], align 4
1020 ; CHECK-NEXT: br label [[END]]
1022 ; CHECK-NEXT: ret void
1025 %val = load i32, ptr %x, align 4
1026 %cmp = icmp eq i32 %val, 4
1027 br i1 %cmp, label %if.eq, label %end
1030 store i32 5, ptr %x, align 4
1037 ; Should not optimize since there is a clobbering acc after load
1038 define void @remove_tautological_store_clobber(ptr %x) {
1039 ; CHECK-LABEL: @remove_tautological_store_clobber(
1040 ; CHECK-NEXT: entry:
1041 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
1042 ; CHECK-NEXT: store i32 5, ptr [[X]], align 4
1043 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VAL]], 4
1044 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[END:%.*]]
1046 ; CHECK-NEXT: store i32 4, ptr [[X]], align 4
1047 ; CHECK-NEXT: br label [[END]]
1049 ; CHECK-NEXT: ret void
1052 %val = load i32, ptr %x, align 4
1053 store i32 5, ptr %x, align 4
1054 %cmp = icmp eq i32 %val, 4
1055 br i1 %cmp, label %if.eq, label %end
1058 store i32 4, ptr %x, align 4
1065 ; Should not optimize since the condition does not dominate the store
1066 define void @remove_tautological_store_no_dom(ptr %x) {
1067 ; CHECK-LABEL: @remove_tautological_store_no_dom(
1068 ; CHECK-NEXT: entry:
1069 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
1070 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VAL]], 4
1071 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[IF_ELSE:%.*]]
1073 ; CHECK-NEXT: br label [[END:%.*]]
1075 ; CHECK-NEXT: br label [[END]]
1077 ; CHECK-NEXT: store i32 4, ptr [[X]], align 4
1078 ; CHECK-NEXT: ret void
1081 %val = load i32, ptr %x, align 4
1082 store i32 5, ptr %x, align 4
1083 %cmp = icmp eq i32 %val, 4
1084 br i1 %cmp, label %if.eq, label %if.else
1093 store i32 4, ptr %x, align 4
1097 ; Should not optimize volatile stores
1098 define void @remove_tautological_store_volatile(ptr %x) {
1099 ; CHECK-LABEL: @remove_tautological_store_volatile(
1100 ; CHECK-NEXT: entry:
1101 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
1102 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VAL]], 4
1103 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[END:%.*]]
1105 ; CHECK-NEXT: store volatile i32 4, ptr [[X]], align 4
1106 ; CHECK-NEXT: br label [[END]]
1108 ; CHECK-NEXT: ret void
1111 %val = load i32, ptr %x, align 4
1112 %cmp = icmp eq i32 %val, 4
1113 br i1 %cmp, label %if.eq, label %end
1116 store volatile i32 4, ptr %x, align 4
1123 ; Should not optimize stores where the edge from branch inst to
1124 ; conditional block does not dominate the conditional block.
1125 ; (A conditional block post dominates the branch inst.)
1126 define void @remove_tautological_store_no_edge_domination(ptr %x) {
1127 ; CHECK-LABEL: @remove_tautological_store_no_edge_domination(
1128 ; CHECK-NEXT: entry:
1129 ; CHECK-NEXT: [[X1:%.*]] = load ptr, ptr [[X:%.*]], align 8
1130 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[X1]], null
1131 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[IF_ELSE:%.*]]
1133 ; CHECK-NEXT: store ptr null, ptr [[X]], align 8
1134 ; CHECK-NEXT: br label [[END:%.*]]
1136 ; CHECK-NEXT: br label [[IF_EQ]]
1138 ; CHECK-NEXT: ret void
1141 %x1 = load ptr, ptr %x, align 8
1142 %cmp = icmp eq ptr %x1, null
1143 br i1 %cmp, label %if.eq, label %if.else
1146 store ptr null, ptr %x, align 8