1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -basic-aa -memcpyopt -dse -S -verify-memoryssa | FileCheck %s
4 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
5 target triple = "i686-apple-darwin9"
7 %0 = type { x86_fp80, x86_fp80 }
10 @C = external constant [0 x i8]
12 declare void @llvm.memcpy.p1i8.p0i8.i64(i8 addrspace(1)* nocapture, i8* nocapture, i64, i1) nounwind
13 declare void @llvm.memcpy.p0i8.p1i8.i64(i8* nocapture, i8 addrspace(1)* nocapture, i64, i1) nounwind
14 declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture, i64, i1) nounwind
15 declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
16 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
17 declare void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
18 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
20 ; Check that one of the memcpy's are removed.
21 ;; FIXME: PR 8643 We should be able to eliminate the last memcpy here.
22 define void @test1(%0* sret(%0) %agg.result, x86_fp80 %z.0, x86_fp80 %z.1) nounwind {
23 ; CHECK-LABEL: @test1(
25 ; CHECK-NEXT: [[TMP2:%.*]] = alloca [[TMP0:%.*]], align 16
26 ; CHECK-NEXT: [[MEMTMP:%.*]] = alloca [[TMP0]], align 16
27 ; CHECK-NEXT: [[TMP5:%.*]] = fsub x86_fp80 0xK80000000000000000000, [[Z_1:%.*]]
28 ; CHECK-NEXT: call void @ccoshl(%0* sret([[TMP0]]) [[TMP2]], x86_fp80 [[TMP5]], x86_fp80 [[Z_0:%.*]]) #[[ATTR2:[0-9]+]]
29 ; CHECK-NEXT: [[TMP219:%.*]] = bitcast %0* [[TMP2]] to i8*
30 ; CHECK-NEXT: [[MEMTMP20:%.*]] = bitcast %0* [[MEMTMP]] to i8*
31 ; CHECK-NEXT: [[AGG_RESULT21:%.*]] = bitcast %0* [[AGG_RESULT:%.*]] to i8*
32 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 [[AGG_RESULT21]], i8* align 16 [[TMP219]], i32 32, i1 false)
33 ; CHECK-NEXT: ret void
37 %memtmp = alloca %0, align 16
38 %tmp5 = fsub x86_fp80 0xK80000000000000000000, %z.1
39 call void @ccoshl(%0* sret(%0) %memtmp, x86_fp80 %tmp5, x86_fp80 %z.0) nounwind
40 %tmp219 = bitcast %0* %tmp2 to i8*
41 %memtmp20 = bitcast %0* %memtmp to i8*
42 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %tmp219, i8* align 16 %memtmp20, i32 32, i1 false)
43 %agg.result21 = bitcast %0* %agg.result to i8*
44 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %agg.result21, i8* align 16 %tmp219, i32 32, i1 false)
48 declare void @ccoshl(%0* nocapture sret(%0), x86_fp80, x86_fp80) nounwind
51 ; The intermediate alloca and one of the memcpy's should be eliminated, the
52 ; other should be related with a memmove.
53 define void @test2(i8* %P, i8* %Q) nounwind {
54 ; CHECK-LABEL: @test2(
55 ; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i32(i8* align 16 [[Q:%.*]], i8* align 16 [[P:%.*]], i32 32, i1 false)
56 ; CHECK-NEXT: ret void
58 %memtmp = alloca %0, align 16
59 %R = bitcast %0* %memtmp to i8*
60 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %R, i8* align 16 %P, i32 32, i1 false)
61 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %Q, i8* align 16 %R, i32 32, i1 false)
66 ; The intermediate alloca and one of the memcpy's should be eliminated, the
67 ; other should be related with a memcpy.
68 define void @test2_constant(i8* %Q) nounwind {
69 ; CHECK-LABEL: @test2_constant(
70 ; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds [0 x i8], [0 x i8]* @C, i64 0, i64 0
71 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 [[Q:%.*]], i8* align 16 [[P]], i32 32, i1 false)
72 ; CHECK-NEXT: ret void
74 %memtmp = alloca %0, align 16
75 %R = bitcast %0* %memtmp to i8*
76 %P = getelementptr inbounds [0 x i8], [0 x i8]* @C, i64 0, i64 0
77 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %R, i8* align 16 %P, i32 32, i1 false)
78 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %Q, i8* align 16 %R, i32 32, i1 false)
83 ; The intermediate alloca and one of the memcpy's should be eliminated, the
84 ; other should be related with a memcpy.
85 define void @test2_memcpy(i8* noalias %P, i8* noalias %Q) nounwind {
86 ; CHECK-LABEL: @test2_memcpy(
87 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 [[Q:%.*]], i8* align 16 [[P:%.*]], i32 32, i1 false)
88 ; CHECK-NEXT: ret void
90 %memtmp = alloca %0, align 16
91 %R = bitcast %0* %memtmp to i8*
92 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %R, i8* align 16 %P, i32 32, i1 false)
93 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %Q, i8* align 16 %R, i32 32, i1 false)
98 ; Same as @test2_memcpy, but the remaining memcpy should remain non-inline even
99 ; if the one eliminated was inline.
100 define void @test3_memcpy(i8* noalias %P, i8* noalias %Q) nounwind {
101 ; CHECK-LABEL: @test3_memcpy(
102 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 [[Q:%.*]], i8* align 16 [[P:%.*]], i32 32, i1 false)
103 ; CHECK-NEXT: ret void
105 %memtmp = alloca %0, align 16
106 %R = bitcast %0* %memtmp to i8*
107 call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* align 16 %R, i8* align 16 %P, i32 32, i1 false)
108 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %Q, i8* align 16 %R, i32 32, i1 false)
113 ; Same as @test2_memcpy, but the remaining memcpy should remain inline even
114 ; if the one eliminated was not inline.
115 define void @test4_memcpy(i8* noalias %P, i8* noalias %Q) nounwind {
116 ; CHECK-LABEL: @test4_memcpy(
117 ; CHECK-NEXT: call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* align 16 [[Q:%.*]], i8* align 16 [[P:%.*]], i32 32, i1 false)
118 ; CHECK-NEXT: ret void
120 %memtmp = alloca %0, align 16
121 %R = bitcast %0* %memtmp to i8*
122 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %R, i8* align 16 %P, i32 32, i1 false)
123 call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* align 16 %Q, i8* align 16 %R, i32 32, i1 false)
128 ; Same as @test2_memcpy, and the inline-ness should be preserved.
129 define void @test5_memcpy(i8* noalias %P, i8* noalias %Q) nounwind {
130 ; CHECK-LABEL: @test5_memcpy(
131 ; CHECK-NEXT: call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* align 16 [[Q:%.*]], i8* align 16 [[P:%.*]], i32 32, i1 false)
132 ; CHECK-NEXT: ret void
134 %memtmp = alloca %0, align 16
135 %R = bitcast %0* %memtmp to i8*
136 call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* align 16 %R, i8* align 16 %P, i32 32, i1 false)
137 call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* align 16 %Q, i8* align 16 %R, i32 32, i1 false)
143 @x = external global %0
145 define void @test3(%0* noalias sret(%0) %agg.result) nounwind {
146 ; CHECK-LABEL: @test3(
147 ; CHECK-NEXT: [[X_0:%.*]] = alloca [[TMP0:%.*]], align 16
148 ; CHECK-NEXT: [[X_01:%.*]] = bitcast %0* [[X_0]] to i8*
149 ; CHECK-NEXT: [[AGG_RESULT1:%.*]] = bitcast %0* [[AGG_RESULT:%.*]] to i8*
150 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 [[AGG_RESULT1]], i8* align 16 bitcast (%0* @x to i8*), i32 32, i1 false)
151 ; CHECK-NEXT: [[AGG_RESULT2:%.*]] = bitcast %0* [[AGG_RESULT]] to i8*
152 ; CHECK-NEXT: ret void
155 %x.01 = bitcast %0* %x.0 to i8*
156 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %x.01, i8* align 16 bitcast (%0* @x to i8*), i32 32, i1 false)
157 %agg.result2 = bitcast %0* %agg.result to i8*
158 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %agg.result2, i8* align 16 %x.01, i32 32, i1 false)
164 define void @test4(i8 *%P) {
165 ; CHECK-LABEL: @test4(
166 ; CHECK-NEXT: call void @test4a(i8* byval(i8) align 1 [[P:%.*]])
167 ; CHECK-NEXT: ret void
170 %a = bitcast %1* %A to i8*
171 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a, i8* align 4 %P, i64 8, i1 false)
172 call void @test4a(i8* align 1 byval(i8) %a)
176 ; Make sure we don't remove the memcpy if the source address space doesn't match the byval argument
177 define void @test4_addrspace(i8 addrspace(1)* %P) {
178 ; CHECK-LABEL: @test4_addrspace(
179 ; CHECK-NEXT: [[A1:%.*]] = alloca [[TMP1:%.*]], align 8
180 ; CHECK-NEXT: [[A2:%.*]] = bitcast %1* [[A1]] to i8*
181 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p1i8.i64(i8* align 4 [[A2]], i8 addrspace(1)* align 4 [[P:%.*]], i64 8, i1 false)
182 ; CHECK-NEXT: call void @test4a(i8* byval(i8) align 1 [[A2]])
183 ; CHECK-NEXT: ret void
186 %a2 = bitcast %1* %a1 to i8*
187 call void @llvm.memcpy.p0i8.p1i8.i64(i8* align 4 %a2, i8 addrspace(1)* align 4 %P, i64 8, i1 false)
188 call void @test4a(i8* align 1 byval(i8) %a2)
192 define void @test4_write_between(i8 *%P) {
193 ; CHECK-LABEL: @test4_write_between(
194 ; CHECK-NEXT: [[A1:%.*]] = alloca [[TMP1:%.*]], align 8
195 ; CHECK-NEXT: [[A2:%.*]] = bitcast %1* [[A1]] to i8*
196 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[A2]], i8* align 4 [[P:%.*]], i64 8, i1 false)
197 ; CHECK-NEXT: store i8 0, i8* [[A2]], align 1
198 ; CHECK-NEXT: call void @test4a(i8* byval(i8) align 1 [[A2]])
199 ; CHECK-NEXT: ret void
202 %a2 = bitcast %1* %a1 to i8*
203 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a2, i8* align 4 %P, i64 8, i1 false)
205 call void @test4a(i8* align 1 byval(i8) %a2)
209 define i8 @test4_read_between(i8 *%P) {
210 ; NO_MSSA-LABEL: @test4_read_between(
211 ; NO_MSSA-NEXT: [[A1:%.*]] = alloca [[TMP1:%.*]], align 8
212 ; NO_MSSA-NEXT: [[A2:%.*]] = bitcast %1* [[A1]] to i8*
213 ; NO_MSSA-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[A2]], i8* align 4 [[P:%.*]], i64 8, i1 false)
214 ; NO_MSSA-NEXT: [[X:%.*]] = load i8, i8* [[A2]], align 1
215 ; NO_MSSA-NEXT: call void @test4a(i8* byval align 1 [[A2]])
216 ; NO_MSSA-NEXT: ret i8 [[X]]
218 ; MSSA-LABEL: @test4_read_between(
219 ; MSSA-NEXT: [[A1:%.*]] = alloca [[TMP1:%.*]], align 8
220 ; MSSA-NEXT: [[A2:%.*]] = bitcast %1* [[A1]] to i8*
221 ; MSSA-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[A2]], i8* align 4 [[P:%.*]], i64 8, i1 false)
222 ; MSSA-NEXT: [[X:%.*]] = load i8, i8* [[A2]], align 1
223 ; MSSA-NEXT: call void @test4a(i8* byval align 1 [[P]])
224 ; MSSA-NEXT: ret i8 [[X]]
227 %a2 = bitcast %1* %a1 to i8*
228 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a2, i8* align 4 %P, i64 8, i1 false)
229 %x = load i8, i8* %a2
230 call void @test4a(i8* align 1 byval(i8) %a2)
234 define void @test4_non_local(i8 *%P, i1 %c) {
235 ; NO_MSSA-LABEL: @test4_non_local(
236 ; NO_MSSA-NEXT: [[A1:%.*]] = alloca [[TMP1:%.*]], align 8
237 ; NO_MSSA-NEXT: [[A2:%.*]] = bitcast %1* [[A1]] to i8*
238 ; NO_MSSA-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[A2]], i8* align 4 [[P:%.*]], i64 8, i1 false)
239 ; NO_MSSA-NEXT: br i1 [[C:%.*]], label [[CALL:%.*]], label [[EXIT:%.*]]
241 ; NO_MSSA-NEXT: call void @test4a(i8* byval align 1 [[A2]])
242 ; NO_MSSA-NEXT: br label [[EXIT]]
244 ; NO_MSSA-NEXT: ret void
246 ; MSSA-LABEL: @test4_non_local(
247 ; MSSA-NEXT: [[A1:%.*]] = alloca [[TMP1:%.*]], align 8
248 ; MSSA-NEXT: [[A2:%.*]] = bitcast %1* [[A1]] to i8*
249 ; MSSA-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[A2]], i8* align 4 [[P:%.*]], i64 8, i1 false)
250 ; MSSA-NEXT: br i1 [[C:%.*]], label [[CALL:%.*]], label [[EXIT:%.*]]
252 ; MSSA-NEXT: call void @test4a(i8* byval align 1 [[P]])
253 ; MSSA-NEXT: br label [[EXIT]]
255 ; MSSA-NEXT: ret void
258 %a2 = bitcast %1* %a1 to i8*
259 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a2, i8* align 4 %P, i64 8, i1 false)
260 br i1 %c, label %call, label %exit
263 call void @test4a(i8* align 1 byval(i8) %a2)
270 declare void @test4a(i8* align 1 byval(i8))
272 %struct.S = type { i128, [4 x i8]}
274 @sS = external global %struct.S, align 16
276 declare void @test5a(%struct.S* align 16 byval(%struct.S)) nounwind ssp
279 ; rdar://8713376 - This memcpy can't be eliminated.
280 define i32 @test5(i32 %x) nounwind ssp {
281 ; CHECK-LABEL: @test5(
283 ; CHECK-NEXT: [[Y:%.*]] = alloca [[STRUCT_S:%.*]], align 16
284 ; CHECK-NEXT: [[TMP:%.*]] = bitcast %struct.S* [[Y]] to i8*
285 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP]], i8* align 16 bitcast (%struct.S* @sS to i8*), i64 32, i1 false)
286 ; CHECK-NEXT: [[A:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[Y]], i64 0, i32 1, i64 0
287 ; CHECK-NEXT: store i8 4, i8* [[A]], align 1
288 ; CHECK-NEXT: call void @test5a(%struct.S* byval([[STRUCT_S]]) align 16 [[Y]])
289 ; CHECK-NEXT: ret i32 0
292 %y = alloca %struct.S, align 16
293 %tmp = bitcast %struct.S* %y to i8*
294 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %tmp, i8* align 16 bitcast (%struct.S* @sS to i8*), i64 32, i1 false)
295 %a = getelementptr %struct.S, %struct.S* %y, i64 0, i32 1, i64 0
297 call void @test5a(%struct.S* align 16 byval(%struct.S) %y)
301 ;; Noop memcpy should be zapped.
302 define void @test6(i8 *%P) {
303 ; CHECK-LABEL: @test6(
304 ; CHECK-NEXT: ret void
306 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %P, i8* align 4 %P, i64 8, i1 false)
311 ; PR9794 - Should forward memcpy into byval argument even though the memcpy
312 ; isn't itself 8 byte aligned.
313 %struct.p = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
315 define i32 @test7(%struct.p* nocapture align 8 byval(%struct.p) %q) nounwind ssp {
316 ; CHECK-LABEL: @test7(
318 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @g(%struct.p* byval([[STRUCT_P:%.*]]) align 8 [[Q:%.*]]) #[[ATTR2]]
319 ; CHECK-NEXT: ret i32 [[CALL]]
322 %agg.tmp = alloca %struct.p, align 4
323 %tmp = bitcast %struct.p* %agg.tmp to i8*
324 %tmp1 = bitcast %struct.p* %q to i8*
325 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %tmp, i8* align 4 %tmp1, i64 48, i1 false)
326 %call = call i32 @g(%struct.p* align 8 byval(%struct.p) %agg.tmp) nounwind
330 declare i32 @g(%struct.p* align 8 byval(%struct.p))
333 ; PR11142 - When looking for a memcpy-memcpy dependency, don't get stuck on
334 ; instructions between the memcpy's that only affect the destination pointer.
335 @test8.str = internal constant [7 x i8] c"ABCDEF\00"
337 define void @test8() {
338 ; CHECK-LABEL: @test8(
339 ; CHECK-NEXT: ret void
341 %A = tail call i8* @malloc(i32 10)
342 %B = getelementptr inbounds i8, i8* %A, i64 2
343 tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %B, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @test8.str, i64 0, i64 0), i32 7, i1 false)
344 %C = tail call i8* @malloc(i32 10)
345 %D = getelementptr inbounds i8, i8* %C, i64 2
346 tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %D, i8* %B, i32 7, i1 false)
350 declare noalias i8* @malloc(i32) willreturn
353 %struct.big = type { [50 x i32] }
355 define void @test9_addrspacecast() nounwind ssp uwtable {
356 ; CHECK-LABEL: @test9_addrspacecast(
358 ; CHECK-NEXT: [[B:%.*]] = alloca [[STRUCT_BIG:%.*]], align 4
359 ; CHECK-NEXT: [[TMP:%.*]] = alloca [[STRUCT_BIG]], align 4
360 ; CHECK-NEXT: call void @f1(%struct.big* sret([[STRUCT_BIG]]) [[B]])
361 ; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast %struct.big* [[B]] to i8 addrspace(1)*
362 ; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast %struct.big* [[TMP]] to i8 addrspace(1)*
363 ; CHECK-NEXT: call void @f2(%struct.big* [[B]])
364 ; CHECK-NEXT: ret void
367 %b = alloca %struct.big, align 4
368 %tmp = alloca %struct.big, align 4
369 call void @f1(%struct.big* sret(%struct.big) %tmp)
370 %0 = addrspacecast %struct.big* %b to i8 addrspace(1)*
371 %1 = addrspacecast %struct.big* %tmp to i8 addrspace(1)*
372 call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* align 4 %0, i8 addrspace(1)* align 4 %1, i64 200, i1 false)
373 call void @f2(%struct.big* %b)
377 define void @test9() nounwind ssp uwtable {
378 ; CHECK-LABEL: @test9(
380 ; CHECK-NEXT: [[B:%.*]] = alloca [[STRUCT_BIG:%.*]], align 4
381 ; CHECK-NEXT: [[TMP:%.*]] = alloca [[STRUCT_BIG]], align 4
382 ; CHECK-NEXT: call void @f1(%struct.big* sret([[STRUCT_BIG]]) [[B]])
383 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast %struct.big* [[B]] to i8*
384 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast %struct.big* [[TMP]] to i8*
385 ; CHECK-NEXT: call void @f2(%struct.big* [[B]])
386 ; CHECK-NEXT: ret void
389 %b = alloca %struct.big, align 4
390 %tmp = alloca %struct.big, align 4
391 call void @f1(%struct.big* sret(%struct.big) %tmp)
392 %0 = bitcast %struct.big* %b to i8*
393 %1 = bitcast %struct.big* %tmp to i8*
394 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 200, i1 false)
395 call void @f2(%struct.big* %b)
400 ; Test10 triggered assertion when the compiler try to get the size of the
401 ; opaque type of *x, where the x is the formal argument with attribute 'sret'.
403 %opaque = type opaque
404 declare void @foo(i32* noalias nocapture)
406 define void @test10(%opaque* noalias nocapture sret(%opaque) %x, i32 %y) {
407 ; CHECK-LABEL: @test10(
408 ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
409 ; CHECK-NEXT: store i32 [[Y:%.*]], i32* [[A]], align 4
410 ; CHECK-NEXT: call void @foo(i32* noalias nocapture [[A]])
411 ; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[A]], align 4
412 ; CHECK-NEXT: [[D:%.*]] = bitcast %opaque* [[X:%.*]] to i32*
413 ; CHECK-NEXT: store i32 [[C]], i32* [[D]], align 4
414 ; CHECK-NEXT: ret void
416 %a = alloca i32, align 4
417 store i32 %y, i32* %a
418 call void @foo(i32* noalias nocapture %a)
419 %c = load i32, i32* %a
420 %d = bitcast %opaque* %x to i32*
421 store i32 %c, i32* %d
425 ; don't create new addressspacecasts when we don't know they're safe for the target
426 define void @test11([20 x i32] addrspace(1)* nocapture dereferenceable(80) %P) {
427 ; CHECK-LABEL: @test11(
428 ; CHECK-NEXT: [[B:%.*]] = bitcast [20 x i32] addrspace(1)* [[P:%.*]] to i8 addrspace(1)*
429 ; CHECK-NEXT: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* align 4 [[B]], i8 0, i64 80, i1 false)
430 ; CHECK-NEXT: ret void
432 %A = alloca [20 x i32], align 4
433 %a = bitcast [20 x i32]* %A to i8*
434 %b = bitcast [20 x i32] addrspace(1)* %P to i8 addrspace(1)*
435 call void @llvm.memset.p0i8.i64(i8* align 4 %a, i8 0, i64 80, i1 false)
436 call void @llvm.memcpy.p1i8.p0i8.i64(i8 addrspace(1)* align 4 %b, i8* align 4 %a, i64 80, i1 false)
440 declare void @f1(%struct.big* nocapture sret(%struct.big))
441 declare void @f2(%struct.big*)