1 ; RUN: opt %loadPolly -polly-codegen-ppcg -polly-acc-dump-code \
2 ; RUN: -disable-output < %s | \
3 ; RUN: FileCheck -check-prefix=CODE %s
5 ; RUN: opt %loadPolly -polly-codegen-ppcg \
7 ; RUN: FileCheck -check-prefix=IR %s
9 ; RUN: opt %loadPolly -polly-codegen-ppcg \
10 ; RUN: -disable-output -polly-acc-dump-kernel-ir < %s | \
11 ; RUN: FileCheck -check-prefix=KERNEL %s
13 ; REQUIRES: pollyacc,nvptx
15 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
17 ; KERNEL: define ptx_kernel void @kernel_0(i8* %MemRef_A, float %MemRef_b)
23 ; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(float), cudaMemcpyHostToDevice));
25 ; CODE-NEXT: dim3 k0_dimBlock(32);
26 ; CODE-NEXT: dim3 k0_dimGrid(32);
27 ; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A, MemRef_b);
28 ; CODE-NEXT: cudaCheckKernel();
31 ; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(float), cudaMemcpyDeviceToHost));
35 ; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
37 ; void foo(float A[], float b) {
38 ; for (long i = 0; i < 1024; i++)
42 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
44 define void @float(float* %A, float %b) {
48 bb1: ; preds = %bb5, %bb
49 %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
50 %exitcond = icmp ne i64 %i.0, 1024
51 br i1 %exitcond, label %bb2, label %bb7
54 %tmp = getelementptr inbounds float, float* %A, i64 %i.0
55 %tmp3 = load float, float* %tmp, align 4
56 %tmp4 = fadd float %tmp3, %b
57 store float %tmp4, float* %tmp, align 4
61 %tmp6 = add nuw nsw i64 %i.0, 1
68 ; KERNEL: define ptx_kernel void @kernel_0(i8* %MemRef_A, double %MemRef_b)
70 ; KERNEL-NEXT: %b.s2a = alloca double
71 ; KERNEL-NEXT: store double %MemRef_b, double* %b.s2a
77 ; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(double), cudaMemcpyHostToDevice));
79 ; CODE-NEXT: dim3 k0_dimBlock(32);
80 ; CODE-NEXT: dim3 k0_dimGrid(32);
81 ; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A, MemRef_b);
82 ; CODE-NEXT: cudaCheckKernel();
85 ; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(double), cudaMemcpyDeviceToHost));
89 ; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
91 ; void foo(double A[], double b) {
92 ; for (long i = 0; i < 1024; i++)
96 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
98 define void @double(double* %A, double %b) {
102 bb1: ; preds = %bb5, %bb
103 %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
104 %exitcond = icmp ne i64 %i.0, 1024
105 br i1 %exitcond, label %bb2, label %bb7
108 %tmp = getelementptr inbounds double, double* %A, i64 %i.0
109 %tmp3 = load double, double* %tmp, align 4
110 %tmp4 = fadd double %tmp3, %b
111 store double %tmp4, double* %tmp, align 4
115 %tmp6 = add nuw nsw i64 %i.0, 1
126 ; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i1), cudaMemcpyHostToDevice));
128 ; CODE-NEXT: dim3 k0_dimBlock(32);
129 ; CODE-NEXT: dim3 k0_dimGrid(32);
130 ; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A);
131 ; CODE-NEXT: cudaCheckKernel();
134 ; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i1), cudaMemcpyDeviceToHost));
138 ; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
140 ; void foo(i1 A[], i1 b) {
141 ; for (long i = 0; i < 1024; i++)
145 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
147 define void @i1(i1* %A, i1 %b) {
151 bb1: ; preds = %bb5, %bb
152 %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
153 %exitcond = icmp ne i64 %i.0, 1024
154 br i1 %exitcond, label %bb2, label %bb7
157 %tmp = getelementptr inbounds i1, i1* %A, i64 %i.0
158 %tmp3 = load i1, i1* %tmp, align 4
159 %tmp4 = add i1 %tmp3, %b
160 store i1 %tmp4, i1* %tmp, align 4
164 %tmp6 = add nuw nsw i64 %i.0, 1
175 ; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i3), cudaMemcpyHostToDevice));
177 ; CODE-NEXT: dim3 k0_dimBlock(32);
178 ; CODE-NEXT: dim3 k0_dimGrid(32);
179 ; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A);
180 ; CODE-NEXT: cudaCheckKernel();
183 ; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i3), cudaMemcpyDeviceToHost));
187 ; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
189 ; void foo(i3 A[], i3 b) {
190 ; for (long i = 0; i < 1024; i++)
194 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
196 define void @i3(i3* %A, i3 %b) {
200 bb1: ; preds = %bb5, %bb
201 %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
202 %exitcond = icmp ne i64 %i.0, 1024
203 br i1 %exitcond, label %bb2, label %bb7
206 %tmp = getelementptr inbounds i3, i3* %A, i64 %i.0
207 %tmp3 = load i3, i3* %tmp, align 4
208 %tmp4 = add i3 %tmp3, %b
209 store i3 %tmp4, i3* %tmp, align 4
213 %tmp6 = add nuw nsw i64 %i.0, 1
224 ; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i8), cudaMemcpyHostToDevice));
226 ; CODE-NEXT: dim3 k0_dimBlock(32);
227 ; CODE-NEXT: dim3 k0_dimGrid(32);
228 ; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A);
229 ; CODE-NEXT: cudaCheckKernel();
232 ; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i8), cudaMemcpyDeviceToHost));
236 ; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
238 ; void foo(i8 A[], i32 b) {
239 ; for (long i = 0; i < 1024; i++)
243 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
245 define void @i8(i8* %A, i8 %b) {
249 bb1: ; preds = %bb5, %bb
250 %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
251 %exitcond = icmp ne i64 %i.0, 1024
252 br i1 %exitcond, label %bb2, label %bb7
255 %tmp = getelementptr inbounds i8, i8* %A, i64 %i.0
256 %tmp3 = load i8, i8* %tmp, align 4
257 %tmp4 = add i8 %tmp3, %b
258 store i8 %tmp4, i8* %tmp, align 4
262 %tmp6 = add nuw nsw i64 %i.0, 1
271 ; IR: [[REGA:%.+]] = call i8* @polly_getDevicePtr(i8* %p_dev_array_MemRef_A)
272 ; IR-NEXT: [[REGB:%.+]] = getelementptr [2 x i8*], [2 x i8*]* %polly_launch_0_params, i64 0, i64 0
273 ; IR-NEXT: store i8* [[REGA:%.+]], i8** %polly_launch_0_param_0
274 ; IR-NEXT: [[REGC:%.+]] = bitcast i8** %polly_launch_0_param_0 to i8*
275 ; IR-NEXT: store i8* [[REGC]], i8** [[REGB]]
276 ; IR-NEXT: store i8 %b, i8* %polly_launch_0_param_1
277 ; IR-NEXT: [[REGD:%.+]] = getelementptr [2 x i8*], [2 x i8*]* %polly_launch_0_params, i64 0, i64 1
278 ; IR-NEXT: store i8* %polly_launch_0_param_1, i8** [[REGD]]
284 ; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i32), cudaMemcpyHostToDevice));
286 ; CODE-NEXT: dim3 k0_dimBlock(32);
287 ; CODE-NEXT: dim3 k0_dimGrid(32);
288 ; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A);
289 ; CODE-NEXT: cudaCheckKernel();
292 ; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i32), cudaMemcpyDeviceToHost));
296 ; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
298 ; void foo(i32 A[], i32 b) {
299 ; for (long i = 0; i < 1024; i++)
303 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
305 define void @i32(i32* %A, i32 %b) {
309 bb1: ; preds = %bb5, %bb
310 %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
311 %exitcond = icmp ne i64 %i.0, 1024
312 br i1 %exitcond, label %bb2, label %bb7
315 %tmp = getelementptr inbounds i32, i32* %A, i64 %i.0
316 %tmp3 = load i32, i32* %tmp, align 4
317 %tmp4 = add i32 %tmp3, %b
318 store i32 %tmp4, i32* %tmp, align 4
322 %tmp6 = add nuw nsw i64 %i.0, 1
333 ; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i60), cudaMemcpyHostToDevice));
335 ; CODE-NEXT: dim3 k0_dimBlock(32);
336 ; CODE-NEXT: dim3 k0_dimGrid(32);
337 ; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A);
338 ; CODE-NEXT: cudaCheckKernel();
341 ; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i60), cudaMemcpyDeviceToHost));
345 ; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
347 ; void foo(i60 A[], i60 b) {
348 ; for (long i = 0; i < 1024; i++)
352 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
354 define void @i60(i60* %A, i60 %b) {
358 bb1: ; preds = %bb5, %bb
359 %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
360 %exitcond = icmp ne i64 %i.0, 1024
361 br i1 %exitcond, label %bb2, label %bb7
364 %tmp = getelementptr inbounds i60, i60* %A, i64 %i.0
365 %tmp3 = load i60, i60* %tmp, align 4
366 %tmp4 = add i60 %tmp3, %b
367 store i60 %tmp4, i60* %tmp, align 4
371 %tmp6 = add nuw nsw i64 %i.0, 1
382 ; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i64), cudaMemcpyHostToDevice));
384 ; CODE-NEXT: dim3 k0_dimBlock(32);
385 ; CODE-NEXT: dim3 k0_dimGrid(32);
386 ; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A);
387 ; CODE-NEXT: cudaCheckKernel();
390 ; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i64), cudaMemcpyDeviceToHost));
394 ; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
396 ; void foo(i64 A[], i64 b) {
397 ; for (long i = 0; i < 1024; i++)
401 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
403 define void @i64(i64* %A, i64 %b) {
407 bb1: ; preds = %bb5, %bb
408 %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
409 %exitcond = icmp ne i64 %i.0, 1024
410 br i1 %exitcond, label %bb2, label %bb7
413 %tmp = getelementptr inbounds i64, i64* %A, i64 %i.0
414 %tmp3 = load i64, i64* %tmp, align 4
415 %tmp4 = add i64 %tmp3, %b
416 store i64 %tmp4, i64* %tmp, align 4
420 %tmp6 = add nuw nsw i64 %i.0, 1