1 ; RUN: opt %loadPolly -polly-scops -polly-invariant-load-hoisting \
2 ; RUN: -analyze < %s | \
3 ; RUN: FileCheck -check-prefix=SCOP %s
5 ; RUN: opt %loadPolly -polly-codegen-ppcg -polly-invariant-load-hoisting \
7 ; RUN: FileCheck -check-prefix=HOST-IR %s
10 ; RUN: opt %loadPolly -polly-codegen-ppcg -polly-invariant-load-hoisting \
11 ; RUN: -disable-output -polly-acc-dump-kernel-ir < %s | \
12 ; RUN: FileCheck -check-prefix=KERNEL-IR %s
16 ; Check that we offload invariant loads of scalars correctly.
18 ; Check that invariant loads are present.
19 ; SCOP: Function: checkPrivatization
20 ; SCOP-NEXT: Region: %entry.split---%for.end
21 ; SCOP-NEXT: Max Loop Depth: 1
22 ; SCOP-NEXT: Invariant Accesses: {
23 ; SCOP-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
24 ; SCOP-NEXT: [tmp, tmp2] -> { Stmt_entry_split[] -> MemRef_begin[0] };
25 ; SCOP-NEXT: Execution Context: [tmp, tmp2] -> { : }
26 ; SCOP-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
27 ; SCOP-NEXT: [tmp, tmp2] -> { Stmt_for_body[i0] -> MemRef_end[0] };
28 ; SCOP-NEXT: Execution Context: [tmp, tmp2] -> { : }
32 ; Check that we do not actually allocate arrays for %begin, %end, since they are
33 ; invariant load hoisted.
34 ; HOST-IR: %p_dev_array_MemRef_A = call i8* @polly_allocateMemoryForDevice
35 ; HOST-IR-NOT: call i8* @polly_allocateMemoryForDevice
37 ; Check that we send the invariant loaded scalars as parameters to the
39 ; KERNEL-IR: define ptx_kernel void @FUNC_checkPrivatization_SCOP_0_KERNEL_0
40 ; KERNEL-IR-SAME: (i8 addrspace(1)* %MemRef_A, i32 %tmp,
41 ; KERNEL-IR-SAME: i32 %tmp2, i32 %polly.access.begin.load,
42 ; KERNEL-IR-SAME: i32 %polly.access.end.load)
45 ; void checkScalarPointerOffload(int A[], int *begin, int *end) {
46 ; for(int i = *begin; i < *end; i++) {
51 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
52 target triple = "x86_64-apple-macosx10.12.0"
54 define void @checkPrivatization(i32* %A, i32* %begin, i32* %end) {
58 entry.split: ; preds = %entry
59 %tmp = load i32, i32* %begin, align 4
60 %tmp21 = load i32, i32* %end, align 4
61 %cmp3 = icmp slt i32 %tmp, %tmp21
62 br i1 %cmp3, label %for.body.lr.ph, label %for.end
64 for.body.lr.ph: ; preds = %entry.split
65 %tmp1 = sext i32 %tmp to i64
68 for.body: ; preds = %for.body.lr.ph, %for.body
69 %indvars.iv4 = phi i64 [ %tmp1, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
70 %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv4
71 store i32 10, i32* %arrayidx, align 4
72 %indvars.iv.next = add i64 %indvars.iv4, 1
73 %tmp2 = load i32, i32* %end, align 4
74 %tmp3 = sext i32 %tmp2 to i64
75 %cmp = icmp slt i64 %indvars.iv.next, %tmp3
76 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
78 for.cond.for.end_crit_edge: ; preds = %for.body
81 for.end: ; preds = %for.cond.for.end_crit_edge, %entry.split