Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / OpenMP / irbuilder_unroll_partial_heuristic_for_collapse.c
blob15f8b3c2cb0f70bc346a667161fcd3ca1f965ea4
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_ size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
3 // expected-no-diagnostics
5 // REQUIRES: x86-registered-target
7 #ifndef HEADER
8 #define HEADER
10 double sind(double);
14 void unroll_partial_heuristic_for(int m, float *a, float *b, float *c, float *d, float *e, float offset) {
15 #pragma omp for collapse(2)
16 for (int i = 0; i < m; i++) {
17 #pragma omp unroll partial
18 for (int j = 0; j < 8; j++) {
19 a[i] += sind(b[i]) * c[i] * d[i] * e[i] + offset;
24 #endif // HEADER
26 // CHECK-LABEL: define {{[^@]+}}@unroll_partial_heuristic_for
27 // CHECK-SAME: (i32 noundef [[M:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]], ptr noundef [[E:%.*]], float noundef [[OFFSET:%.*]]) #[[ATTR0:[0-9]+]] {
28 // CHECK-NEXT: entry:
29 // CHECK-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
30 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
31 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
32 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
33 // CHECK-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
34 // CHECK-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 8
35 // CHECK-NEXT: [[OFFSET_ADDR:%.*]] = alloca float, align 4
36 // CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
37 // CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
38 // CHECK-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
39 // CHECK-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
40 // CHECK-NEXT: [[J:%.*]] = alloca i32, align 4
41 // CHECK-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
42 // CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
43 // CHECK-NEXT: [[DOTUNROLLED_IV_J:%.*]] = alloca i32, align 4
44 // CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
45 // CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
46 // CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
47 // CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
48 // CHECK-NEXT: [[I6:%.*]] = alloca i32, align 4
49 // CHECK-NEXT: [[DOTUNROLLED_IV_J7:%.*]] = alloca i32, align 4
50 // CHECK-NEXT: [[DOTUNROLL_INNER_IV_J:%.*]] = alloca i32, align 4
51 // CHECK-NEXT: store i32 [[M]], ptr [[M_ADDR]], align 4
52 // CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
53 // CHECK-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
54 // CHECK-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
55 // CHECK-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
56 // CHECK-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 8
57 // CHECK-NEXT: store float [[OFFSET]], ptr [[OFFSET_ADDR]], align 4
58 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[M_ADDR]], align 4
59 // CHECK-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4
60 // CHECK-NEXT: store i32 0, ptr [[J]], align 4
61 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
62 // CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
63 // CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
64 // CHECK-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64
65 // CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], 4
66 // CHECK-NEXT: [[SUB3:%.*]] = sub nsw i64 [[MUL]], 1
67 // CHECK-NEXT: store i64 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 8
68 // CHECK-NEXT: store i32 0, ptr [[I]], align 4
69 // CHECK-NEXT: store i32 0, ptr [[DOTUNROLLED_IV_J]], align 4
70 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
71 // CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP2]]
72 // CHECK-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
73 // CHECK: omp.precond.then:
74 // CHECK-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8
75 // CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8
76 // CHECK-NEXT: store i64 [[TMP3]], ptr [[DOTOMP_UB]], align 8
77 // CHECK-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8
78 // CHECK-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
79 // CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]])
80 // CHECK-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1)
81 // CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
82 // CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8
83 // CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i64 [[TMP4]], [[TMP5]]
84 // CHECK-NEXT: br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
85 // CHECK: cond.true:
86 // CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8
87 // CHECK-NEXT: br label [[COND_END:%.*]]
88 // CHECK: cond.false:
89 // CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
90 // CHECK-NEXT: br label [[COND_END]]
91 // CHECK: cond.end:
92 // CHECK-NEXT: [[COND:%.*]] = phi i64 [ [[TMP6]], [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
93 // CHECK-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8
94 // CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8
95 // CHECK-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_IV]], align 8
96 // CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
97 // CHECK: omp.inner.for.cond:
98 // CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
99 // CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
100 // CHECK-NEXT: [[CMP10:%.*]] = icmp sle i64 [[TMP9]], [[TMP10]]
101 // CHECK-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
102 // CHECK: omp.inner.for.body:
103 // CHECK-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
104 // CHECK-NEXT: [[DIV12:%.*]] = sdiv i64 [[TMP11]], 4
105 // CHECK-NEXT: [[MUL13:%.*]] = mul nsw i64 [[DIV12]], 1
106 // CHECK-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL13]]
107 // CHECK-NEXT: [[CONV14:%.*]] = trunc i64 [[ADD]] to i32
108 // CHECK-NEXT: store i32 [[CONV14]], ptr [[I6]], align 4
109 // CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
110 // CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
111 // CHECK-NEXT: [[DIV15:%.*]] = sdiv i64 [[TMP13]], 4
112 // CHECK-NEXT: [[MUL16:%.*]] = mul nsw i64 [[DIV15]], 4
113 // CHECK-NEXT: [[SUB17:%.*]] = sub nsw i64 [[TMP12]], [[MUL16]]
114 // CHECK-NEXT: [[MUL18:%.*]] = mul nsw i64 [[SUB17]], 2
115 // CHECK-NEXT: [[ADD19:%.*]] = add nsw i64 0, [[MUL18]]
116 // CHECK-NEXT: [[CONV20:%.*]] = trunc i64 [[ADD19]] to i32
117 // CHECK-NEXT: store i32 [[CONV20]], ptr [[DOTUNROLLED_IV_J7]], align 4
118 // CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTUNROLLED_IV_J7]], align 4
119 // CHECK-NEXT: store i32 [[TMP14]], ptr [[DOTUNROLL_INNER_IV_J]], align 4
120 // CHECK-NEXT: br label [[FOR_COND:%.*]]
121 // CHECK: for.cond:
122 // CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTUNROLL_INNER_IV_J]], align 4
123 // CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTUNROLLED_IV_J7]], align 4
124 // CHECK-NEXT: [[ADD21:%.*]] = add nsw i32 [[TMP16]], 2
125 // CHECK-NEXT: [[CMP22:%.*]] = icmp slt i32 [[TMP15]], [[ADD21]]
126 // CHECK-NEXT: br i1 [[CMP22]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
127 // CHECK: land.rhs:
128 // CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTUNROLL_INNER_IV_J]], align 4
129 // CHECK-NEXT: [[CMP24:%.*]] = icmp slt i32 [[TMP17]], 8
130 // CHECK-NEXT: br label [[LAND_END]]
131 // CHECK: land.end:
132 // CHECK-NEXT: [[TMP18:%.*]] = phi i1 [ false, [[FOR_COND]] ], [ [[CMP24]], [[LAND_RHS]] ]
133 // CHECK-NEXT: br i1 [[TMP18]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
134 // CHECK: for.body:
135 // CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTUNROLL_INNER_IV_J]], align 4
136 // CHECK-NEXT: [[MUL26:%.*]] = mul nsw i32 [[TMP19]], 1
137 // CHECK-NEXT: [[ADD27:%.*]] = add nsw i32 0, [[MUL26]]
138 // CHECK-NEXT: store i32 [[ADD27]], ptr [[J]], align 4
139 // CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[B_ADDR]], align 8
140 // CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[I6]], align 4
141 // CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
142 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 [[IDXPROM]]
143 // CHECK-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX]], align 4
144 // CHECK-NEXT: [[CONV28:%.*]] = fpext float [[TMP22]] to double
145 // CHECK-NEXT: [[CALL:%.*]] = call double @sind(double noundef [[CONV28]])
146 // CHECK-NEXT: [[TMP23:%.*]] = load ptr, ptr [[C_ADDR]], align 8
147 // CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[I6]], align 4
148 // CHECK-NEXT: [[IDXPROM29:%.*]] = sext i32 [[TMP24]] to i64
149 // CHECK-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[IDXPROM29]]
150 // CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[ARRAYIDX30]], align 4
151 // CHECK-NEXT: [[CONV31:%.*]] = fpext float [[TMP25]] to double
152 // CHECK-NEXT: [[MUL32:%.*]] = fmul double [[CALL]], [[CONV31]]
153 // CHECK-NEXT: [[TMP26:%.*]] = load ptr, ptr [[D_ADDR]], align 8
154 // CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[I6]], align 4
155 // CHECK-NEXT: [[IDXPROM33:%.*]] = sext i32 [[TMP27]] to i64
156 // CHECK-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 [[IDXPROM33]]
157 // CHECK-NEXT: [[TMP28:%.*]] = load float, ptr [[ARRAYIDX34]], align 4
158 // CHECK-NEXT: [[CONV35:%.*]] = fpext float [[TMP28]] to double
159 // CHECK-NEXT: [[MUL36:%.*]] = fmul double [[MUL32]], [[CONV35]]
160 // CHECK-NEXT: [[TMP29:%.*]] = load ptr, ptr [[E_ADDR]], align 8
161 // CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[I6]], align 4
162 // CHECK-NEXT: [[IDXPROM37:%.*]] = sext i32 [[TMP30]] to i64
163 // CHECK-NEXT: [[ARRAYIDX38:%.*]] = getelementptr inbounds float, ptr [[TMP29]], i64 [[IDXPROM37]]
164 // CHECK-NEXT: [[TMP31:%.*]] = load float, ptr [[ARRAYIDX38]], align 4
165 // CHECK-NEXT: [[CONV39:%.*]] = fpext float [[TMP31]] to double
166 // CHECK-NEXT: [[MUL40:%.*]] = fmul double [[MUL36]], [[CONV39]]
167 // CHECK-NEXT: [[TMP32:%.*]] = load float, ptr [[OFFSET_ADDR]], align 4
168 // CHECK-NEXT: [[CONV41:%.*]] = fpext float [[TMP32]] to double
169 // CHECK-NEXT: [[ADD42:%.*]] = fadd double [[MUL40]], [[CONV41]]
170 // CHECK-NEXT: [[TMP33:%.*]] = load ptr, ptr [[A_ADDR]], align 8
171 // CHECK-NEXT: [[TMP34:%.*]] = load i32, ptr [[I6]], align 4
172 // CHECK-NEXT: [[IDXPROM43:%.*]] = sext i32 [[TMP34]] to i64
173 // CHECK-NEXT: [[ARRAYIDX44:%.*]] = getelementptr inbounds float, ptr [[TMP33]], i64 [[IDXPROM43]]
174 // CHECK-NEXT: [[TMP35:%.*]] = load float, ptr [[ARRAYIDX44]], align 4
175 // CHECK-NEXT: [[CONV45:%.*]] = fpext float [[TMP35]] to double
176 // CHECK-NEXT: [[ADD46:%.*]] = fadd double [[CONV45]], [[ADD42]]
177 // CHECK-NEXT: [[CONV47:%.*]] = fptrunc double [[ADD46]] to float
178 // CHECK-NEXT: store float [[CONV47]], ptr [[ARRAYIDX44]], align 4
179 // CHECK-NEXT: br label [[FOR_INC:%.*]]
180 // CHECK: for.inc:
181 // CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTUNROLL_INNER_IV_J]], align 4
182 // CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP36]], 1
183 // CHECK-NEXT: store i32 [[INC]], ptr [[DOTUNROLL_INNER_IV_J]], align 4
184 // CHECK-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
185 // CHECK: for.end:
186 // CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
187 // CHECK: omp.body.continue:
188 // CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
189 // CHECK: omp.inner.for.inc:
190 // CHECK-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
191 // CHECK-NEXT: [[ADD48:%.*]] = add nsw i64 [[TMP37]], 1
192 // CHECK-NEXT: store i64 [[ADD48]], ptr [[DOTOMP_IV]], align 8
193 // CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
194 // CHECK: omp.inner.for.end:
195 // CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
196 // CHECK: omp.loop.exit:
197 // CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM49:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB5:[0-9]+]])
198 // CHECK-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM49]])
199 // CHECK-NEXT: br label [[OMP_PRECOND_END]]
200 // CHECK: omp.precond.end:
201 // CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM50:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7:[0-9]+]])
202 // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB6:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM50]])
203 // CHECK-NEXT: ret void