1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2 ; RUN: opt -mtriple=x86_64-pc-linux-gnu -passes=pre-isel-intrinsic-lowering -S -o - %s | FileCheck %s
4 ; Constant length memcpy.inline should be left unmodified.
5 define void @memcpy_32(ptr %dst, ptr %src) nounwind {
6 ; CHECK-LABEL: define void @memcpy_32(
7 ; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]]) #[[ATTR0:[0-9]+]] {
8 ; CHECK-NEXT: call void @llvm.memcpy.inline.p0.p0.i64(ptr [[DST]], ptr [[SRC]], i64 32, i1 false)
9 ; CHECK-NEXT: tail call void @llvm.memcpy.inline.p0.p0.i64(ptr [[DST]], ptr [[SRC]], i64 32, i1 true)
10 ; CHECK-NEXT: ret void
12 call void @llvm.memcpy.inline.p0.p0.i64(ptr %dst, ptr %src, i64 32, i1 0)
13 tail call void @llvm.memcpy.inline.p0.p0.i64(ptr %dst, ptr %src, i64 32, i1 1)
17 define void @memcpy_x(ptr %dst, ptr %src, i64 %x) nounwind {
18 ; CHECK-LABEL: define void @memcpy_x(
19 ; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
20 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i64 [[X]], 0
21 ; CHECK-NEXT: br i1 [[TMP1]], label %[[LOOP_MEMCPY_EXPANSION:.*]], label %[[POST_LOOP_MEMCPY_EXPANSION:.*]]
22 ; CHECK: [[LOOP_MEMCPY_EXPANSION]]:
23 ; CHECK-NEXT: [[LOOP_INDEX:%.*]] = phi i64 [ 0, [[TMP0:%.*]] ], [ [[TMP5:%.*]], %[[LOOP_MEMCPY_EXPANSION]] ]
24 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[LOOP_INDEX]]
25 ; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[TMP2]], align 1
26 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[LOOP_INDEX]]
27 ; CHECK-NEXT: store i8 [[TMP3]], ptr [[TMP4]], align 1
28 ; CHECK-NEXT: [[TMP5]] = add i64 [[LOOP_INDEX]], 1
29 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ult i64 [[TMP5]], [[X]]
30 ; CHECK-NEXT: br i1 [[TMP6]], label %[[LOOP_MEMCPY_EXPANSION]], label %[[POST_LOOP_MEMCPY_EXPANSION]]
31 ; CHECK: [[POST_LOOP_MEMCPY_EXPANSION]]:
32 ; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i64 [[X]], 0
33 ; CHECK-NEXT: br i1 [[TMP7]], label %[[LOOP_MEMCPY_EXPANSION2:.*]], label %[[POST_LOOP_MEMCPY_EXPANSION1:.*]]
34 ; CHECK: [[LOOP_MEMCPY_EXPANSION2]]:
35 ; CHECK-NEXT: [[LOOP_INDEX3:%.*]] = phi i64 [ 0, %[[POST_LOOP_MEMCPY_EXPANSION]] ], [ [[TMP11:%.*]], %[[LOOP_MEMCPY_EXPANSION2]] ]
36 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[LOOP_INDEX3]]
37 ; CHECK-NEXT: [[TMP9:%.*]] = load volatile i8, ptr [[TMP8]], align 1
38 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[LOOP_INDEX3]]
39 ; CHECK-NEXT: store volatile i8 [[TMP9]], ptr [[TMP10]], align 1
40 ; CHECK-NEXT: [[TMP11]] = add i64 [[LOOP_INDEX3]], 1
41 ; CHECK-NEXT: [[TMP12:%.*]] = icmp ult i64 [[TMP11]], [[X]]
42 ; CHECK-NEXT: br i1 [[TMP12]], label %[[LOOP_MEMCPY_EXPANSION2]], label %[[POST_LOOP_MEMCPY_EXPANSION1]]
43 ; CHECK: [[POST_LOOP_MEMCPY_EXPANSION1]]:
44 ; CHECK-NEXT: ret void
46 call void @llvm.memcpy.inline.p0.p0.i64(ptr %dst, ptr %src, i64 %x, i1 0)
47 tail call void @llvm.memcpy.inline.p0.p0.i64(ptr %dst, ptr %src, i64 %x, i1 1)