1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes=memcpyopt -S %s -verify-memoryssa | FileCheck %s
5 define i32 @redundant_memmove() {
6 ; CHECK-LABEL: @redundant_memmove(
7 ; CHECK-NEXT: [[ARRAY:%.*]] = alloca [26 x i32], align 16
8 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ARRAY]], i8 0, i64 104, i1 false)
9 ; CHECK-NEXT: [[ARRAY_IDX:%.*]] = getelementptr inbounds i8, ptr [[ARRAY]], i64 4
10 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ARRAY]], align 16
11 ; CHECK-NEXT: ret i32 [[VAL]]
13 %array = alloca [26 x i32], align 16
14 call void @llvm.memset.p0.i64(ptr align 16 %array, i8 0, i64 104, i1 false)
15 %array.idx = getelementptr inbounds i8, ptr %array, i64 4
16 call void @llvm.memmove.p0.p0.i64(ptr align 16 %array, ptr align 4 %array.idx, i64 100, i1 false)
17 %val = load i32, ptr %array, align 16
21 ; Used memmove, buffer is reset to zero.
22 define i32 @used_memmove_1() {
23 ; CHECK-LABEL: @used_memmove_1(
24 ; CHECK-NEXT: [[ARRAY:%.*]] = alloca [26 x i32], align 16
25 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ARRAY]], i8 0, i64 104, i1 false)
26 ; CHECK-NEXT: [[ARRAY_IDX:%.*]] = getelementptr inbounds i8, ptr [[ARRAY]], i64 4
27 ; CHECK-NEXT: store i32 1, ptr [[ARRAY_IDX]], align 4
28 ; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 16 [[ARRAY]], ptr align 4 [[ARRAY_IDX]], i64 100, i1 false)
29 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ARRAY_IDX]], align 4
30 ; CHECK-NEXT: ret i32 [[VAL]]
32 %array = alloca [26 x i32], align 16
33 call void @llvm.memset.p0.i64(ptr align 16 %array, i8 0, i64 104, i1 false)
34 %array.idx = getelementptr inbounds i8, ptr %array, i64 4
35 store i32 1, ptr %array.idx
36 call void @llvm.memmove.p0.p0.i64(ptr align 16 %array, ptr align 4 %array.idx, i64 100, i1 false)
37 %val = load i32, ptr %array.idx, align 4
42 define i32 @used_memmove_2() {
43 ; CHECK-LABEL: @used_memmove_2(
44 ; CHECK-NEXT: [[ARRAY:%.*]] = alloca [26 x i32], align 16
45 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ARRAY]], i8 0, i64 104, i1 false)
46 ; CHECK-NEXT: [[ARRAY_IDX:%.*]] = getelementptr inbounds i8, ptr [[ARRAY]], i64 4
47 ; CHECK-NEXT: store i32 1, ptr [[ARRAY]], align 4
48 ; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 16 [[ARRAY]], ptr align 4 [[ARRAY_IDX]], i64 100, i1 false)
49 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ARRAY_IDX]], align 4
50 ; CHECK-NEXT: ret i32 [[VAL]]
52 %array = alloca [26 x i32], align 16
53 call void @llvm.memset.p0.i64(ptr align 16 %array, i8 0, i64 104, i1 false)
54 %array.idx = getelementptr inbounds i8, ptr %array, i64 4
55 store i32 1, ptr %array
56 call void @llvm.memmove.p0.p0.i64(ptr align 16 %array, ptr align 4 %array.idx, i64 100, i1 false)
57 %val = load i32, ptr %array.idx, align 4
61 ; Used memmove, buffer clobbered by opaque.
62 define i32 @used_memmove_3() {
63 ; CHECK-LABEL: @used_memmove_3(
64 ; CHECK-NEXT: [[ARRAY:%.*]] = alloca [25 x i32], align 16
65 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ARRAY]], i8 0, i64 100, i1 false)
66 ; CHECK-NEXT: call void @opaque(ptr [[ARRAY]])
67 ; CHECK-NEXT: [[ARRAY_IDX:%.*]] = getelementptr inbounds i8, ptr [[ARRAY]], i64 4
68 ; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 16 [[ARRAY]], ptr align 4 [[ARRAY_IDX]], i64 96, i1 false)
69 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ARRAY]], align 16
70 ; CHECK-NEXT: ret i32 [[VAL]]
72 %array = alloca [25 x i32], align 16
73 call void @llvm.memset.p0.i64(ptr align 16 %array, i8 0, i64 100, i1 false)
74 call void @opaque(ptr %array)
75 %array.idx = getelementptr inbounds i8, ptr %array, i64 4
76 call void @llvm.memmove.p0.p0.i64(ptr align 16 %array, ptr align 4 %array.idx, i64 96, i1 false)
77 %val = load i32, ptr %array, align 16
81 ; Redundant memmove, not within the same basic block.
82 define i32 @redundant_memmove_different_bbs() {
83 ; CHECK-LABEL: @redundant_memmove_different_bbs(
85 ; CHECK-NEXT: [[ARRAY:%.*]] = alloca [26 x i32], align 16
86 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ARRAY]], i8 0, i64 104, i1 false)
87 ; CHECK-NEXT: [[ARRAY_IDX:%.*]] = getelementptr inbounds i8, ptr [[ARRAY]], i64 4
88 ; CHECK-NEXT: br label [[USE:%.*]]
90 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ARRAY]], align 16
91 ; CHECK-NEXT: ret i32 [[VAL]]
94 %array = alloca [26 x i32], align 16
95 call void @llvm.memset.p0.i64(ptr align 16 %array, i8 0, i64 104, i1 false)
96 %array.idx = getelementptr inbounds i8, ptr %array, i64 4
100 call void @llvm.memmove.p0.p0.i64(ptr align 16 %array, ptr align 4 %array.idx, i64 100, i1 false)
101 %val = load i32, ptr %array, align 16
105 @g_var = global [26 x i32] zeroinitializer, align 16
107 ; Redundant memmove on a global variable.
108 define ptr @redundant_memmove_memset_global_variable() {
109 ; CHECK-LABEL: @redundant_memmove_memset_global_variable(
110 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 @g_var, i8 0, i64 104, i1 false)
111 ; CHECK-NEXT: ret ptr @g_var
113 call void @llvm.memset.p0.i64(ptr align 16 @g_var, i8 0, i64 104, i1 false)
114 call void @llvm.memmove.p0.p0.i64(ptr align 16 @g_var, ptr align 4 getelementptr inbounds nuw (i8, ptr @g_var, i64 4), i64 100, i1 false)
118 ; Memset only partial.
119 define i32 @partial_memset() {
120 ; CHECK-LABEL: @partial_memset(
121 ; CHECK-NEXT: [[ARRAY:%.*]] = alloca [26 x i32], align 16
122 ; CHECK-NEXT: [[ARRAY_IDX:%.*]] = getelementptr inbounds i8, ptr [[ARRAY]], i64 92
123 ; CHECK-NEXT: store i32 1, ptr [[ARRAY_IDX]], align 4
124 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ARRAY]], i8 0, i64 26, i1 false)
125 ; CHECK-NEXT: [[ARRAY_IDX_2:%.*]] = getelementptr inbounds i8, ptr [[ARRAY]], i64 4
126 ; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 16 [[ARRAY]], ptr align 4 [[ARRAY_IDX_2]], i64 100, i1 false)
127 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ARRAY_IDX]], align 4
128 ; CHECK-NEXT: ret i32 [[VAL]]
130 %array = alloca [26 x i32], align 16
131 %array.idx = getelementptr inbounds i8, ptr %array, i64 92
132 store i32 1, ptr %array.idx
133 call void @llvm.memset.p0.i64(ptr align 16 %array, i8 0, i64 26, i1 false)
134 %array.idx.2 = getelementptr inbounds i8, ptr %array, i64 4
135 call void @llvm.memmove.p0.p0.i64(ptr align 16 %array, ptr align 4 %array.idx.2, i64 100, i1 false)
136 %val = load i32, ptr %array.idx, align 4
140 ; Memset length not constant.
141 define i32 @memset_length_not_constant(i64 %size) {
142 ; CHECK-LABEL: @memset_length_not_constant(
143 ; CHECK-NEXT: [[ARRAY:%.*]] = alloca [26 x i32], align 16
144 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ARRAY]], i8 0, i64 [[SIZE:%.*]], i1 false)
145 ; CHECK-NEXT: [[ARRAY_IDX:%.*]] = getelementptr inbounds i8, ptr [[ARRAY]], i64 4
146 ; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 16 [[ARRAY]], ptr align 4 [[ARRAY_IDX]], i64 100, i1 false)
147 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ARRAY]], align 16
148 ; CHECK-NEXT: ret i32 [[VAL]]
150 %array = alloca [26 x i32], align 16
151 call void @llvm.memset.p0.i64(ptr align 16 %array, i8 0, i64 %size, i1 false)
152 %array.idx = getelementptr inbounds i8, ptr %array, i64 4
153 call void @llvm.memmove.p0.p0.i64(ptr align 16 %array, ptr align 4 %array.idx, i64 100, i1 false)
154 %val = load i32, ptr %array, align 16
158 ; Memmove buffer not memset'd, different buffers.
159 define i32 @memset_memmove_dest_buffers_not_alias() {
160 ; CHECK-LABEL: @memset_memmove_dest_buffers_not_alias(
161 ; CHECK-NEXT: [[ARRAY:%.*]] = alloca [26 x i32], align 16
162 ; CHECK-NEXT: [[ARRAY2:%.*]] = alloca [26 x i32], align 16
163 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ARRAY]], i8 0, i64 104, i1 false)
164 ; CHECK-NEXT: [[ARRAY2_IDX:%.*]] = getelementptr inbounds i8, ptr [[ARRAY2]], i64 4
165 ; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 16 [[ARRAY2]], ptr align 4 [[ARRAY2_IDX]], i64 100, i1 false)
166 ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ARRAY2]], align 16
167 ; CHECK-NEXT: ret i32 [[VAL]]
169 %array = alloca [26 x i32], align 16
170 %array2 = alloca [26 x i32], align 16
171 call void @llvm.memset.p0.i64(ptr align 16 %array, i8 0, i64 104, i1 false)
172 %array2.idx = getelementptr inbounds i8, ptr %array2, i64 4
173 call void @llvm.memmove.p0.p0.i64(ptr align 16 %array2, ptr align 4 %array2.idx, i64 100, i1 false)
174 %val = load i32, ptr %array2, align 16
178 declare void @opaque(ptr)
179 declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
180 declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1)