1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes=memcpyopt -S < %s -verify-memoryssa | FileCheck %s
4 target datalayout = "e-i64:64-f80:128-n8:16:32:64"
5 target triple = "x86_64-unknown-linux-gnu"
7 %S = type { ptr, i8, i32 }
9 define void @copy(ptr %src, ptr %dst) {
11 ; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 8 [[DST:%.*]], ptr align 8 [[SRC:%.*]], i64 16, i1 false)
12 ; CHECK-NEXT: ret void
14 %1 = load %S, ptr %src
19 define void @noaliassrc(ptr noalias %src, ptr %dst) {
20 ; CHECK-LABEL: @noaliassrc(
21 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DST:%.*]], ptr align 8 [[SRC:%.*]], i64 16, i1 false)
22 ; CHECK-NEXT: ret void
24 %1 = load %S, ptr %src
29 define void @noaliasdst(ptr %src, ptr noalias %dst) {
30 ; CHECK-LABEL: @noaliasdst(
31 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DST:%.*]], ptr align 8 [[SRC:%.*]], i64 16, i1 false)
32 ; CHECK-NEXT: ret void
34 %1 = load %S, ptr %src
39 define void @destroysrc(ptr %src, ptr %dst) {
40 ; CHECK-LABEL: @destroysrc(
41 ; CHECK-NEXT: [[TMP1:%.*]] = load [[S:%.*]], ptr [[SRC:%.*]], align 8
42 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[SRC]], i8 0, i64 16, i1 false)
43 ; CHECK-NEXT: store [[S]] [[TMP1]], ptr [[DST:%.*]], align 8
44 ; CHECK-NEXT: ret void
46 %1 = load %S, ptr %src
47 store %S zeroinitializer, ptr %src
52 define void @destroynoaliassrc(ptr noalias %src, ptr %dst) {
53 ; CHECK-LABEL: @destroynoaliassrc(
54 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DST:%.*]], ptr align 8 [[SRC]], i64 16, i1 false)
55 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[SRC:%.*]], i8 0, i64 16, i1 false)
56 ; CHECK-NEXT: ret void
58 %1 = load %S, ptr %src
59 store %S zeroinitializer, ptr %src
64 define void @copyalias(ptr %src, ptr %dst) {
65 ; CHECK-LABEL: @copyalias(
66 ; CHECK-NEXT: [[TMP1:%.*]] = load [[S:%.*]], ptr [[SRC:%.*]], align 8
67 ; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 8 [[DST:%.*]], ptr align 8 [[SRC]], i64 16, i1 false)
68 ; CHECK-NEXT: store [[S]] [[TMP1]], ptr [[DST]], align 8
69 ; CHECK-NEXT: ret void
71 %1 = load %S, ptr %src
72 %2 = load %S, ptr %src
78 ; If the store address is computed in a complex manner, make
79 ; sure we lift the computation as well if needed and possible.
80 define void @addrproducer(ptr %src, ptr %dst) {
81 ; CHECK-LABEL: @addrproducer(
82 ; CHECK-NEXT: [[DST2:%.*]] = getelementptr [[S:%.*]], ptr [[DST]], i64 1
83 ; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 8 [[DST2]], ptr align 8 [[SRC:%.*]], i64 16, i1 false)
84 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[DST:%.*]], i8 undef, i64 16, i1 false)
85 ; CHECK-NEXT: ret void
87 %1 = load %S, ptr %src
88 store %S undef, ptr %dst
89 %dst2 = getelementptr %S , ptr %dst, i64 1
90 store %S %1, ptr %dst2
94 define void @aliasaddrproducer(ptr %src, ptr %dst, ptr %dstidptr) {
95 ; CHECK-LABEL: @aliasaddrproducer(
96 ; CHECK-NEXT: [[TMP1:%.*]] = load [[S:%.*]], ptr [[SRC:%.*]], align 8
97 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[DST:%.*]], i8 undef, i64 16, i1 false)
98 ; CHECK-NEXT: [[DSTINDEX:%.*]] = load i32, ptr [[DSTIDPTR:%.*]], align 4
99 ; CHECK-NEXT: [[DST2:%.*]] = getelementptr [[S]], ptr [[DST]], i32 [[DSTINDEX]]
100 ; CHECK-NEXT: store [[S]] [[TMP1]], ptr [[DST2]], align 8
101 ; CHECK-NEXT: ret void
103 %1 = load %S, ptr %src
104 store %S undef, ptr %dst
105 %dstindex = load i32, ptr %dstidptr
106 %dst2 = getelementptr %S , ptr %dst, i32 %dstindex
107 store %S %1, ptr %dst2
111 define void @noaliasaddrproducer(ptr %src, ptr noalias %dst, ptr noalias %dstidptr) {
112 ; CHECK-LABEL: @noaliasaddrproducer(
113 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[DSTIDPTR:%.*]], align 4
114 ; CHECK-NEXT: [[DSTINDEX:%.*]] = or i32 [[TMP2]], 1
115 ; CHECK-NEXT: [[DST2:%.*]] = getelementptr [[S:%.*]], ptr [[DST:%.*]], i32 [[DSTINDEX]]
116 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DST2]], ptr align 8 [[SRC]], i64 16, i1 false)
117 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[SRC:%.*]], i8 undef, i64 16, i1 false)
118 ; CHECK-NEXT: ret void
120 %1 = load %S, ptr %src
121 store %S undef, ptr %src
122 %2 = load i32, ptr %dstidptr
123 %dstindex = or i32 %2, 1
124 %dst2 = getelementptr %S , ptr %dst, i32 %dstindex
125 store %S %1, ptr %dst2
129 define void @throwing_call(ptr noalias %src, ptr %dst) {
130 ; CHECK-LABEL: @throwing_call(
131 ; CHECK-NEXT: [[TMP1:%.*]] = load [[S:%.*]], ptr [[SRC:%.*]], align 8
132 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[SRC]], i8 0, i64 16, i1 false)
133 ; CHECK-NEXT: call void @call() [[ATTR2:#.*]]
134 ; CHECK-NEXT: store [[S]] [[TMP1]], ptr [[DST:%.*]], align 8
135 ; CHECK-NEXT: ret void
137 %1 = load %S, ptr %src
138 store %S zeroinitializer, ptr %src
139 call void @call() readnone
140 store %S %1, ptr %dst
144 define void @loop_memoryphi(ptr %a, ptr %b) {
145 ; CHECK-LABEL: @loop_memoryphi(
146 ; CHECK-NEXT: br label [[LOOP:%.*]]
148 ; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 8 [[B:%.*]], ptr align 8 [[A:%.*]], i64 16, i1 false)
149 ; CHECK-NEXT: br label [[LOOP]]
154 %v = load { i64, i64 }, ptr %a
155 store { i64, i64 } %v, ptr %b