1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -aa-pipeline=basic-aa -passes='require<memoryssa>,memcpyopt' -verify-memoryssa -S %s | FileCheck %s
6 target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
7 target triple = "x86_64-apple-macosx10.15.0"
9 %t = type <{ ptr, [4 x i8], ptr, ptr, i32, [8192 x i8] }>
12 define i32 @test1(ptr %ptr) {
13 ; CHECK-LABEL: @test1(
14 ; CHECK-NEXT: invoke.cont6:
15 ; CHECK-NEXT: [[P_2:%.*]] = getelementptr inbounds [[T:%.*]], ptr [[PTR:%.*]], i64 0, i32 4
16 ; CHECK-NEXT: [[P_3:%.*]] = getelementptr inbounds [[T]], ptr [[PTR]], i64 0, i32 5, i64 0
17 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[PTR]], i8 0, i64 20, i1 false)
18 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[P_2]], i8 0, i64 8195, i1 false)
19 ; CHECK-NEXT: ret i32 0
22 call void @llvm.memset.p0.i64(ptr %ptr, i8 0, i64 20, i1 false)
23 store ptr null, ptr %ptr, align 8
24 %p.2 = getelementptr inbounds %t, ptr %ptr, i64 0, i32 4
25 store i32 0, ptr %p.2, align 8
26 %p.3 = getelementptr inbounds %t, ptr %ptr, i64 0, i32 5, i64 0
27 call void @llvm.memset.p0.i64(ptr %p.3, i8 0, i64 8191, i1 false)
31 declare ptr @get_ptr()
33 define void @test2(ptr noalias %in) {
34 ; CHECK-LABEL: @test2(
36 ; CHECK-NEXT: [[CALL_I1_I:%.*]] = tail call ptr @get_ptr()
37 ; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr [[CALL_I1_I]], ptr [[IN:%.*]], i64 10, i1 false)
38 ; CHECK-NEXT: ret void
41 %call.i1.i = tail call ptr @get_ptr()
42 tail call void @llvm.memset.p0.i64(ptr %call.i1.i, i8 0, i64 10, i1 false)
43 tail call void @llvm.memcpy.p0.p0.i64(ptr %call.i1.i, ptr %in, i64 10, i1 false)
47 declare ptr @malloc(i64)
49 define i32 @test3(ptr noalias %in) {
50 ; CHECK-LABEL: @test3(
51 ; CHECK-NEXT: [[CALL_I_I_I:%.*]] = tail call ptr @malloc(i64 20)
52 ; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr [[CALL_I_I_I]], ptr [[IN:%.*]], i64 20, i1 false)
53 ; CHECK-NEXT: ret i32 10
55 %call.i.i.i = tail call ptr @malloc(i64 20)
56 tail call void @llvm.memmove.p0.p0.i64(ptr %call.i.i.i, ptr %in, i64 20, i1 false)
60 define void @test4(i32 %n, ptr noalias %ptr.0, ptr noalias %ptr.1, ptr %ptr.2) unnamed_addr {
61 ; CHECK-LABEL: @test4(
62 ; CHECK-NEXT: [[ELEM_I:%.*]] = getelementptr i8, ptr [[PTR_0:%.*]], i64 8
63 ; CHECK-NEXT: store i32 [[N:%.*]], ptr [[PTR_2:%.*]], align 8
64 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[ELEM_I]], ptr [[PTR_1:%.*]], i64 10, i1 false)
65 ; CHECK-NEXT: ret void
67 %elem.i = getelementptr i8, ptr %ptr.0, i64 8
68 call void @llvm.memset.p0.i64(ptr %elem.i, i8 0, i64 10, i1 false)
69 store i32 %n, ptr %ptr.2, align 8
70 call void @llvm.memcpy.p0.p0.i64(ptr %elem.i, ptr %ptr.1, i64 10, i1 false)
74 declare void @decompose(ptr nocapture)
76 define void @test5(ptr %ptr) {
77 ; CHECK-LABEL: @test5(
79 ; CHECK-NEXT: [[EARLY_DATA:%.*]] = alloca [128 x i8], align 8
80 ; CHECK-NEXT: [[TMP:%.*]] = alloca [[T:%.*]], align 8
81 ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[EARLY_DATA]])
82 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[PTR:%.*]], align 8
83 ; CHECK-NEXT: call fastcc void @decompose(ptr [[TMP]])
84 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[EARLY_DATA]], ptr [[TMP]], i64 32, i1 false)
85 ; CHECK-NEXT: ret void
88 %early_data = alloca [128 x i8], align 8
89 %tmp = alloca %t, align 8
90 call void @llvm.lifetime.start.p0(i64 32, ptr %early_data)
91 %0 = load i32, ptr %ptr, align 8
92 call fastcc void @decompose(ptr %tmp)
93 call void @llvm.memcpy.p0.p0.i64(ptr %early_data, ptr %tmp, i64 32, i1 false)
97 define i8 @test6(ptr %ptr, ptr noalias %ptr.1) {
98 ; CHECK-LABEL: @test6(
100 ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr [[PTR:%.*]])
101 ; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[PTR]], align 8
102 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[PTR]], ptr [[PTR_1:%.*]], i64 24, i1 false)
103 ; CHECK-NEXT: ret i8 [[TMP0]]
106 call void @llvm.lifetime.start.p0(i64 24, ptr %ptr)
107 %0 = load i8, ptr %ptr, align 8
108 call void @llvm.memmove.p0.p0.i64(ptr %ptr, ptr %ptr.1, i64 24, i1 false)
112 define void @test7(ptr %ptr) {
113 ; CHECK-LABEL: @test7(
115 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [4 x i32], ptr [[PTR:%.*]], i64 0, i32 1
116 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [4 x i32], ptr [[PTR]], i64 0, i32 2
117 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x i32], ptr [[PTR]], i64 0, i32 3
118 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[PTR]], i8 0, i64 16, i1 false)
119 ; CHECK-NEXT: call void @clobber()
120 ; CHECK-NEXT: ret void
123 store i32 0, ptr %ptr, align 1
124 %0 = getelementptr inbounds [4 x i32], ptr %ptr, i64 0, i32 1
125 store i32 0, ptr %0, align 1
126 %1 = getelementptr inbounds [4 x i32], ptr %ptr, i64 0, i32 2
127 store i32 0, ptr %1, align 1
128 %2 = getelementptr inbounds [4 x i32], ptr %ptr, i64 0, i32 3
129 store i32 0, ptr %2, align 1
134 define void @test8(ptr noalias %src, ptr %dst) {
135 ; CHECK-LABEL: @test8(
136 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DST:%.*]], ptr align 1 [[SRC:%.*]], i64 8224, i1 false)
137 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[SRC]], i8 0, i64 8224, i1 false)
138 ; CHECK-NEXT: ret void
140 %1 = load %t, ptr %src
141 store %t zeroinitializer, ptr %src
142 store %t %1, ptr %dst
146 declare void @clobber()
148 ; Function Attrs: argmemonly nounwind willreturn
149 declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
151 ; Function Attrs: argmemonly nounwind willreturn
152 declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #0
154 ; Function Attrs: argmemonly nounwind willreturn writeonly
155 declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #1
157 ; Function Attrs: argmemonly nounwind willreturn
158 declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1 immarg) #0
160 attributes #0 = { argmemonly nounwind willreturn }
161 attributes #1 = { argmemonly nounwind willreturn writeonly }