1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; MemCpy optimizations should take place even in presence of invariant.start
3 ; RUN: opt < %s -passes=memcpyopt -S -verify-memoryssa | FileCheck %s
5 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
7 target triple = "i686-apple-darwin9"
9 %0 = type { x86_fp80, x86_fp80 }
10 declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
11 declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
12 declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
14 declare ptr @llvm.invariant.start.p0(i64, ptr nocapture) nounwind readonly
16 ; The intermediate alloca and one of the memcpy's should be eliminated, the
17 ; other should be transformed to a memmove.
18 define void @test1(ptr %P, ptr %Q) nounwind {
19 ; CHECK-LABEL: @test1(
20 ; CHECK-NEXT: [[MEMTMP:%.*]] = alloca [[TMP0:%.*]], align 16
21 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 16 [[MEMTMP]], ptr align 16 [[P:%.*]], i32 32, i1 false)
22 ; CHECK-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 32, ptr [[P]])
23 ; CHECK-NEXT: call void @llvm.memmove.p0.p0.i32(ptr align 16 [[Q:%.*]], ptr align 16 [[P]], i32 32, i1 false)
24 ; CHECK-NEXT: ret void
26 %memtmp = alloca %0, align 16
27 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %memtmp, ptr align 16 %P, i32 32, i1 false)
28 %i = call ptr @llvm.invariant.start.p0(i64 32, ptr %P)
29 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %Q, ptr align 16 %memtmp, i32 32, i1 false)
34 ; The invariant.start intrinsic does not inhibit tranforming the memcpy to a
36 define void @test2(ptr %dst1, ptr %dst2, i8 %c) {
37 ; CHECK-LABEL: @test2(
38 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[DST1:%.*]], i8 [[C:%.*]], i64 128, i1 false)
39 ; CHECK-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 32, ptr [[DST1]])
40 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[DST2:%.*]], i8 [[C]], i64 128, i1 false)
41 ; CHECK-NEXT: ret void
43 call void @llvm.memset.p0.i64(ptr %dst1, i8 %c, i64 128, i1 false)
44 %i = call ptr @llvm.invariant.start.p0(i64 32, ptr %dst1)
45 call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst2, ptr align 8 %dst1, i64 128, i1 false)