1 ; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 -O0 | FileCheck %s --check-prefix PTX
2 ; RUN: opt < %s -S -nvptx-lower-aggr-copies | FileCheck %s --check-prefix IR
3 ; RUN: opt < %s -S -nvptx-lower-aggr-copies -use-wide-memcpy-loop-lowering=true | FileCheck %s --check-prefix WIR
5 ; Verify that the NVPTXLowerAggrCopies pass works as expected - calls to
6 ; llvm.mem* intrinsics get lowered to loops.
8 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
9 target triple = "nvptx64-unknown-unknown"
11 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
12 declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
13 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #1
15 define i8* @memcpy_caller(i8* %dst, i8* %src, i64 %n) #0 {
17 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %n, i32 1, i1 false)
20 ; IR-LABEL: @memcpy_caller
21 ; IR: [[CMPREG:%[0-9]+]] = icmp eq i64 0, %n
22 ; IR: br i1 [[CMPREG]], label %split, label %loadstoreloop
24 ; IR: [[LOADPTR:%[0-9]+]] = getelementptr inbounds i8, i8* %src, i64
25 ; IR-NEXT: [[VAL:%[0-9]+]] = load i8, i8* [[LOADPTR]]
26 ; IR-NEXT: [[STOREPTR:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64
27 ; IR-NEXT: store i8 [[VAL]], i8* [[STOREPTR]]
29 ; PTX-LABEL: .visible .func (.param .b64 func_retval0) memcpy_caller
30 ; PTX: LBB[[LABEL:[_0-9]+]]:
31 ; PTX: ld.u8 %rs[[REG:[0-9]+]]
32 ; PTX: st.u8 [%rd{{[0-9]+}}], %rs[[REG]]
33 ; PTX: add.s64 %rd[[COUNTER:[0-9]+]], %rd{{[0-9]+}}, 1
34 ; PTX: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
35 ; PTX: @%p[[PRED]] bra LBB[[LABEL]]
37 ; WIR-LABEL: @memcpy_caller
39 ; WIR: [[LoopCount:%[0-9]+]] = udiv i64 %n, 1
40 ; WIR: [[ResidualSize:%[0-9]+]] = urem i64 %n, 1
41 ; WIR: [[Cond:%[0-9]+]] = icmp ne i64 [[LoopCount]], 0
42 ; WIR: br i1 [[Cond]], label %loop-memcpy-expansion, label %post-loop-memcpy-expansion
44 ; WIR: loop-memcpy-expansion:
45 ; WIR: %loop-index = phi i64 [ 0, %entry ], [ [[IndexInc:%[0-9]+]], %loop-memcpy-expansion ]
46 ; WIR: [[SrcGep:%[0-9]+]] = getelementptr inbounds i8, i8* %src, i64 %loop-index
47 ; WIR: [[Load:%[0-9]+]] = load i8, i8* [[SrcGep]]
48 ; WIR: [[DstGep:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64 %loop-index
49 ; WIR: store i8 [[Load]], i8* [[DstGep]]
50 ; WIR: [[IndexInc]] = add i64 %loop-index, 1
51 ; WIR: [[Cond2:%[0-9]+]] = icmp ult i64 [[IndexInc]], [[LoopCount]]
52 ; WIR: br i1 [[Cond2]], label %loop-memcpy-expansion, label %post-loop-memcpy-expansion
55 define i8* @memcpy_volatile_caller(i8* %dst, i8* %src, i64 %n) #0 {
57 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %n, i32 1, i1 true)
60 ; IR-LABEL: @memcpy_volatile_caller
64 ; PTX-LABEL: .visible .func (.param .b64 func_retval0) memcpy_volatile_caller
65 ; PTX: LBB[[LABEL:[_0-9]+]]:
66 ; PTX: ld.volatile.u8 %rs[[REG:[0-9]+]]
67 ; PTX: st.volatile.u8 [%rd{{[0-9]+}}], %rs[[REG]]
68 ; PTX: add.s64 %rd[[COUNTER:[0-9]+]], %rd{{[0-9]+}}, 1
69 ; PTX: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
70 ; PTX: @%p[[PRED]] bra LBB[[LABEL]]
72 ; WIR-LABEL: @memcpy_volatile_caller
74 ; WIR: [[LoopCount:%[0-9]+]] = udiv i64 %n, 1
75 ; WIR: [[ResidualSize:%[0-9]+]] = urem i64 %n, 1
76 ; WIR: [[Cond:%[0-9]+]] = icmp ne i64 [[LoopCount]], 0
77 ; WIR: br i1 [[Cond]], label %loop-memcpy-expansion, label %post-loop-memcpy-expansion
79 ; WIR: loop-memcpy-expansion:
80 ; WIR: %loop-index = phi i64 [ 0, %entry ], [ [[IndexInc:%[0-9]+]], %loop-memcpy-expansion ]
81 ; WIR: [[SrcGep:%[0-9]+]] = getelementptr inbounds i8, i8* %src, i64 %loop-index
82 ; WIR: [[Load:%[0-9]+]] = load volatile i8, i8* [[SrcGep]]
83 ; WIR: [[DstGep:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64 %loop-index
84 ; WIR: store volatile i8 [[Load]], i8* [[DstGep]]
85 ; WIR: [[IndexInc]] = add i64 %loop-index, 1
86 ; WIR: [[Cond2:%[0-9]+]] = icmp ult i64 [[IndexInc]], [[LoopCount]]
87 ; WIR: br i1 [[Cond2]], label %loop-memcpy-expansion, label %post-loop-memcpy-expansion
90 define i8* @memcpy_casting_caller(i32* %dst, i32* %src, i64 %n) #0 {
92 %0 = bitcast i32* %dst to i8*
93 %1 = bitcast i32* %src to i8*
94 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 %n, i32 1, i1 false)
97 ; Check that casts in calls to memcpy are handled properly
98 ; IR-LABEL: @memcpy_casting_caller
99 ; IR: [[DSTCAST:%[0-9]+]] = bitcast i32* %dst to i8*
100 ; IR: [[SRCCAST:%[0-9]+]] = bitcast i32* %src to i8*
101 ; IR: getelementptr inbounds i8, i8* [[SRCCAST]]
102 ; IR: getelementptr inbounds i8, i8* [[DSTCAST]]
104 ; WIR-LABEL: @memcpy_casting_caller
105 ; WIR: [[DSTCAST:%[0-9]+]] = bitcast i32* %dst to i8*
106 ; WIR: [[SRCCAST:%[0-9]+]] = bitcast i32* %src to i8*
107 ; WIR: getelementptr inbounds i8, i8* [[SRCCAST]]
108 ; WIR: getelementptr inbounds i8, i8* [[DSTCAST]]
111 define i8* @memcpy_known_size(i8* %dst, i8* %src) {
113 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 144, i32 1, i1 false)
116 ; Check that calls with compile-time constant size are handled correctly
117 ; WIR-LABEL: @memcpy_known_size
119 ; WIR: br label %load-store-loop
120 ; WIR: load-store-loop:
121 ; WIR: %loop-index = phi i64 [ 0, %entry ], [ [[IndexInc:%[0-9]+]], %load-store-loop ]
122 ; WIR: [[SrcGep:%[0-9]+]] = getelementptr inbounds i8, i8* %src, i64 %loop-index
123 ; WIR: [[Load:%[0-9]+]] = load i8, i8* [[SrcGep]]
124 ; WIR: [[DstGep:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64 %loop-index
125 ; WIR: store i8 [[Load]], i8* [[DstGep]]
126 ; WIR: [[IndexInc]] = add i64 %loop-index, 1
127 ; WIR: [[Cond:%[0-9]+]] = icmp ult i64 %3, 144
128 ; WIR: br i1 [[Cond]], label %load-store-loop, label %memcpy-split
131 define i8* @memset_caller(i8* %dst, i32 %c, i64 %n) #0 {
133 %0 = trunc i32 %c to i8
134 tail call void @llvm.memset.p0i8.i64(i8* %dst, i8 %0, i64 %n, i32 1, i1 false)
137 ; IR-LABEL: @memset_caller
138 ; IR: [[VAL:%[0-9]+]] = trunc i32 %c to i8
139 ; IR: [[CMPREG:%[0-9]+]] = icmp eq i64 0, %n
140 ; IR: br i1 [[CMPREG]], label %split, label %loadstoreloop
142 ; IR: [[STOREPTR:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64
143 ; IR-NEXT: store i8 [[VAL]], i8* [[STOREPTR]]
145 ; PTX-LABEL: .visible .func (.param .b64 func_retval0) memset_caller(
146 ; PTX: ld.param.u32 %r[[C:[0-9]+]]
147 ; PTX: cvt.u16.u32 %rs[[REG:[0-9]+]], %r[[C]];
148 ; PTX: LBB[[LABEL:[_0-9]+]]:
149 ; PTX: st.u8 [%rd{{[0-9]+}}], %rs[[REG]]
150 ; PTX: add.s64 %rd[[COUNTER:[0-9]+]], %rd{{[0-9]+}}, 1
151 ; PTX: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
152 ; PTX: @%p[[PRED]] bra LBB[[LABEL]]
155 define i8* @volatile_memset_caller(i8* %dst, i32 %c, i64 %n) #0 {
157 %0 = trunc i32 %c to i8
158 tail call void @llvm.memset.p0i8.i64(i8* %dst, i8 %0, i64 %n, i32 1, i1 true)
161 ; IR-LABEL: @volatile_memset_caller
162 ; IR: [[VAL:%[0-9]+]] = trunc i32 %c to i8
164 ; IR: [[STOREPTR:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64
165 ; IR-NEXT: store volatile i8 [[VAL]], i8* [[STOREPTR]]
168 define i8* @memmove_caller(i8* %dst, i8* %src, i64 %n) #0 {
170 tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %n, i32 1, i1 false)
173 ; IR-LABEL: @memmove_caller
174 ; IR: icmp ult i8* %src, %dst
175 ; IR: [[PHIVAL:%[0-9a-zA-Z_]+]] = phi i64
176 ; IR-NEXT: %index_ptr = sub i64 [[PHIVAL]], 1
177 ; IR: [[FWDPHIVAL:%[0-9a-zA-Z_]+]] = phi i64
178 ; IR: {{%[0-9a-zA-Z_]+}} = add i64 [[FWDPHIVAL]], 1
180 ; PTX-LABEL: .visible .func (.param .b64 func_retval0) memmove_caller(
181 ; PTX: ld.param.u64 %rd[[N:[0-9]+]]
182 ; PTX-DAG: setp.eq.s64 %p[[NEQ0:[0-9]+]], %rd[[N]], 0
183 ; PTX-DAG: setp.ge.u64 %p[[SRC_GT_THAN_DST:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}}
184 ; PTX-NEXT: @%p[[SRC_GT_THAN_DST]] bra LBB[[FORWARD_BB:[0-9_]+]]
185 ; -- this is the backwards copying BB
186 ; PTX: @%p[[NEQ0]] bra LBB[[EXIT:[0-9_]+]]
187 ; PTX: add.s64 %rd{{[0-9]}}, %rd{{[0-9]}}, -1
188 ; PTX: ld.u8 %rs[[ELEMENT:[0-9]+]]
189 ; PTX: st.u8 [%rd{{[0-9]+}}], %rs[[ELEMENT]]
190 ; -- this is the forwards copying BB
191 ; PTX: LBB[[FORWARD_BB]]:
192 ; PTX: @%p[[NEQ0]] bra LBB[[EXIT]]
193 ; PTX: ld.u8 %rs[[ELEMENT2:[0-9]+]]
194 ; PTX: st.u8 [%rd{{[0-9]+}}], %rs[[ELEMENT2]]
195 ; PTX: add.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, 1
198 ; PTX-NEXT: st.param.b64 [func_retval0