1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -basic-aa -dse -S | FileCheck %s
3 ; RUN: opt < %s -aa-pipeline=basic-aa -passes=dse -S | FileCheck %s
4 target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
6 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
7 declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture, i8, i64, i32) nounwind
8 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
9 declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32) nounwind
10 declare void @llvm.init.trampoline(i8*, i8*, i8*)
13 ;; Overwrite of memset by memcpy.
14 define void @test17(i8* %P, i8* noalias %Q) nounwind ssp {
15 ; CHECK-LABEL: @test17(
16 ; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P:%.*]], i8* [[Q:%.*]], i64 12, i1 false)
17 ; CHECK-NEXT: ret void
19 tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i1 false)
20 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
24 ;; Overwrite of memset by memcpy.
25 define void @test17_atomic(i8* %P, i8* noalias %Q) nounwind ssp {
26 ; CHECK-LABEL: @test17_atomic(
27 ; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
28 ; CHECK-NEXT: ret void
30 tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %P, i8 42, i64 8, i32 1)
31 tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
35 ;; Overwrite of memset by memcpy. Overwrite is stronger atomicity. We can
37 define void @test17_atomic_weaker(i8* %P, i8* noalias %Q) nounwind ssp {
38 ; CHECK-LABEL: @test17_atomic_weaker(
39 ; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
40 ; CHECK-NEXT: ret void
42 tail call void @llvm.memset.p0i8.i64(i8* align 1 %P, i8 42, i64 8, i1 false)
43 tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
47 ;; Overwrite of memset by memcpy. Overwrite is weaker atomicity. We can remove
49 define void @test17_atomic_weaker_2(i8* %P, i8* noalias %Q) nounwind ssp {
50 ; CHECK-LABEL: @test17_atomic_weaker_2(
51 ; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i1 false)
52 ; CHECK-NEXT: ret void
54 tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %P, i8 42, i64 8, i32 1)
55 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i1 false)
59 ; Should not delete the volatile memset.
60 define void @test17v(i8* %P, i8* %Q) nounwind ssp {
61 ; CHECK-LABEL: @test17v(
62 ; CHECK-NEXT: tail call void @llvm.memset.p0i8.i64(i8* [[P:%.*]], i8 42, i64 8, i1 true)
63 ; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P]], i8* [[Q:%.*]], i64 12, i1 false)
64 ; CHECK-NEXT: ret void
66 tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i1 true)
67 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
71 ; See PR11763 - LLVM allows memcpy's source and destination to be equal (but not
72 ; inequal and overlapping).
73 define void @test18(i8* %P, i8* %Q, i8* %R) nounwind ssp {
74 ; CHECK-LABEL: @test18(
75 ; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P:%.*]], i8* [[Q:%.*]], i64 12, i1 false)
76 ; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P]], i8* [[R:%.*]], i64 12, i1 false)
77 ; CHECK-NEXT: ret void
79 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
80 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i1 false)
84 define void @test18_atomic(i8* %P, i8* %Q, i8* %R) nounwind ssp {
85 ; CHECK-LABEL: @test18_atomic(
86 ; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
87 ; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P]], i8* align 1 [[R:%.*]], i64 12, i32 1)
88 ; CHECK-NEXT: ret void
90 tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
91 tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %R, i64 12, i32 1)
95 define void @test_memset_memcpy_inline(i8* noalias %P, i8* noalias %Q) {
96 ; CHECK-LABEL: @test_memset_memcpy_inline(
97 ; CHECK-NEXT: tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i1 false)
98 ; CHECK-NEXT: ret void
100 tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i1 false)
101 tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i1 false)
105 define void @test_store_memcpy_inline(i8* noalias %P, i8* noalias %Q) {
106 ; CHECK-LABEL: @test_store_memcpy_inline(
107 ; CHECK-NEXT: [[P_4:%.*]] = getelementptr i8, i8* [[P:%.*]], i64 4
108 ; CHECK-NEXT: store i8 4, i8* [[P_4]], align 1
109 ; CHECK-NEXT: tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 [[P]], i8* align 1 [[Q:%.*]], i64 4, i1 false)
110 ; CHECK-NEXT: ret void
113 %P.1 = getelementptr i8, i8* %P, i64 1
115 %P.4 = getelementptr i8, i8* %P, i64 4
117 tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 4, i1 false)
121 declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64 immarg, i1 immarg)