Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / OpenMP / generic_loop_codegen.cpp
blobc3ad43bebccaf58395839972a028b566db4df2f4
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name tmp2 --version 2
2 // RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp %s
3 // RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=IR
5 // Check same results after serialization round-trip
6 // RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-pch -o %t %s
7 // RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -include-pch %t -emit-llvm %s -o - | FileCheck %s --check-prefix=IR-PCH
9 // expected-no-diagnostics
11 #ifndef HEADER
12 #define HEADER
14 void foo(int t) {
16 int i, j, z;
17 #pragma omp loop collapse(2) reduction(+:z) lastprivate(j) bind(thread)
18 for (int i = 0; i<t; ++i)
19 for (j = 0; j<t; ++j)
20 z += i+j;
22 #endif
23 // IR-LABEL: define dso_local void @_Z3fooi
24 // IR-SAME: (i32 noundef [[T:%.*]]) #[[ATTR0:[0-9]+]] {
25 // IR-NEXT: entry:
26 // IR-NEXT: [[T_ADDR:%.*]] = alloca i32, align 4
27 // IR-NEXT: [[I:%.*]] = alloca i32, align 4
28 // IR-NEXT: [[J:%.*]] = alloca i32, align 4
29 // IR-NEXT: [[Z:%.*]] = alloca i32, align 4
30 // IR-NEXT: [[TMP:%.*]] = alloca i32, align 4
31 // IR-NEXT: [[TMP2TMP1:%.*]] = alloca i32, align 4
32 // IR-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
33 // IR-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
34 // IR-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
35 // IR-NEXT: [[I8:%.*]] = alloca i32, align 4
36 // IR-NEXT: [[J9:%.*]] = alloca i32, align 4
37 // IR-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
38 // IR-NEXT: [[I11:%.*]] = alloca i32, align 4
39 // IR-NEXT: [[J12:%.*]] = alloca i32, align 4
40 // IR-NEXT: [[Z13:%.*]] = alloca i32, align 4
41 // IR-NEXT: store i32 [[T]], ptr [[T_ADDR]], align 4
42 // IR-NEXT: [[TMP0:%.*]] = load i32, ptr [[T_ADDR]], align 4
43 // IR-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4
44 // IR-NEXT: [[TMP1:%.*]] = load i32, ptr [[T_ADDR]], align 4
45 // IR-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_2]], align 4
46 // IR-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
47 // IR-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
48 // IR-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
49 // IR-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64
50 // IR-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
51 // IR-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP3]], 0
52 // IR-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
53 // IR-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
54 // IR-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
55 // IR-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
56 // IR-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8
57 // IR-NEXT: store i32 0, ptr [[I8]], align 4
58 // IR-NEXT: store i32 0, ptr [[J9]], align 4
59 // IR-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
60 // IR-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
61 // IR-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[SIMD_IF_END:%.*]]
62 // IR: land.lhs.true:
63 // IR-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
64 // IR-NEXT: [[CMP10:%.*]] = icmp slt i32 0, [[TMP5]]
65 // IR-NEXT: br i1 [[CMP10]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END]]
66 // IR: simd.if.then:
67 // IR-NEXT: store i64 0, ptr [[DOTOMP_IV]], align 8
68 // IR-NEXT: store i32 0, ptr [[Z13]], align 4
69 // IR-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
70 // IR: omp.inner.for.cond:
71 // IR-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3:![0-9]+]]
72 // IR-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !llvm.access.group [[ACC_GRP3]]
73 // IR-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP7]], 1
74 // IR-NEXT: [[CMP14:%.*]] = icmp slt i64 [[TMP6]], [[ADD]]
75 // IR-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
76 // IR: omp.inner.for.body:
77 // IR-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]]
78 // IR-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]]
79 // IR-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP9]], 0
80 // IR-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
81 // IR-NEXT: [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]]
82 // IR-NEXT: [[CONV18:%.*]] = sext i32 [[MUL17]] to i64
83 // IR-NEXT: [[DIV19:%.*]] = sdiv i64 [[TMP8]], [[CONV18]]
84 // IR-NEXT: [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1
85 // IR-NEXT: [[ADD21:%.*]] = add nsw i64 0, [[MUL20]]
86 // IR-NEXT: [[CONV22:%.*]] = trunc i64 [[ADD21]] to i32
87 // IR-NEXT: store i32 [[CONV22]], ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP3]]
88 // IR-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]]
89 // IR-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]]
90 // IR-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]]
91 // IR-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP12]], 0
92 // IR-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1
93 // IR-NEXT: [[MUL25:%.*]] = mul nsw i32 1, [[DIV24]]
94 // IR-NEXT: [[CONV26:%.*]] = sext i32 [[MUL25]] to i64
95 // IR-NEXT: [[DIV27:%.*]] = sdiv i64 [[TMP11]], [[CONV26]]
96 // IR-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]]
97 // IR-NEXT: [[SUB28:%.*]] = sub nsw i32 [[TMP13]], 0
98 // IR-NEXT: [[DIV29:%.*]] = sdiv i32 [[SUB28]], 1
99 // IR-NEXT: [[MUL30:%.*]] = mul nsw i32 1, [[DIV29]]
100 // IR-NEXT: [[CONV31:%.*]] = sext i32 [[MUL30]] to i64
101 // IR-NEXT: [[MUL32:%.*]] = mul nsw i64 [[DIV27]], [[CONV31]]
102 // IR-NEXT: [[SUB33:%.*]] = sub nsw i64 [[TMP10]], [[MUL32]]
103 // IR-NEXT: [[MUL34:%.*]] = mul nsw i64 [[SUB33]], 1
104 // IR-NEXT: [[ADD35:%.*]] = add nsw i64 0, [[MUL34]]
105 // IR-NEXT: [[CONV36:%.*]] = trunc i64 [[ADD35]] to i32
106 // IR-NEXT: store i32 [[CONV36]], ptr [[J12]], align 4, !llvm.access.group [[ACC_GRP3]]
107 // IR-NEXT: [[TMP14:%.*]] = load i32, ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP3]]
108 // IR-NEXT: [[TMP15:%.*]] = load i32, ptr [[J12]], align 4, !llvm.access.group [[ACC_GRP3]]
109 // IR-NEXT: [[ADD37:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
110 // IR-NEXT: [[TMP16:%.*]] = load i32, ptr [[Z13]], align 4, !llvm.access.group [[ACC_GRP3]]
111 // IR-NEXT: [[ADD38:%.*]] = add nsw i32 [[TMP16]], [[ADD37]]
112 // IR-NEXT: store i32 [[ADD38]], ptr [[Z13]], align 4, !llvm.access.group [[ACC_GRP3]]
113 // IR-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
114 // IR: omp.body.continue:
115 // IR-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
116 // IR: omp.inner.for.inc:
117 // IR-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]]
118 // IR-NEXT: [[ADD39:%.*]] = add nsw i64 [[TMP17]], 1
119 // IR-NEXT: store i64 [[ADD39]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]]
120 // IR-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
121 // IR: omp.inner.for.end:
122 // IR-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
123 // IR-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP18]], 0
124 // IR-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
125 // IR-NEXT: [[MUL42:%.*]] = mul nsw i32 [[DIV41]], 1
126 // IR-NEXT: [[ADD43:%.*]] = add nsw i32 0, [[MUL42]]
127 // IR-NEXT: store i32 [[ADD43]], ptr [[I11]], align 4
128 // IR-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
129 // IR-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP19]], 0
130 // IR-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
131 // IR-NEXT: [[MUL46:%.*]] = mul nsw i32 [[DIV45]], 1
132 // IR-NEXT: [[ADD47:%.*]] = add nsw i32 0, [[MUL46]]
133 // IR-NEXT: store i32 [[ADD47]], ptr [[J]], align 4
134 // IR-NEXT: [[TMP20:%.*]] = load i32, ptr [[Z]], align 4
135 // IR-NEXT: [[TMP21:%.*]] = load i32, ptr [[Z13]], align 4
136 // IR-NEXT: [[ADD48:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
137 // IR-NEXT: store i32 [[ADD48]], ptr [[Z]], align 4
138 // IR-NEXT: br label [[SIMD_IF_END]]
139 // IR: simd.if.end:
140 // IR-NEXT: ret void
143 // IR-PCH-LABEL: define dso_local void @_Z3fooi
144 // IR-PCH-SAME: (i32 noundef [[T:%.*]]) #[[ATTR0:[0-9]+]] {
145 // IR-PCH-NEXT: entry:
146 // IR-PCH-NEXT: [[T_ADDR:%.*]] = alloca i32, align 4
147 // IR-PCH-NEXT: [[I:%.*]] = alloca i32, align 4
148 // IR-PCH-NEXT: [[J:%.*]] = alloca i32, align 4
149 // IR-PCH-NEXT: [[Z:%.*]] = alloca i32, align 4
150 // IR-PCH-NEXT: [[TMP:%.*]] = alloca i32, align 4
151 // IR-PCH-NEXT: [[TMP2TMP1:%.*]] = alloca i32, align 4
152 // IR-PCH-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
153 // IR-PCH-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
154 // IR-PCH-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
155 // IR-PCH-NEXT: [[I8:%.*]] = alloca i32, align 4
156 // IR-PCH-NEXT: [[J9:%.*]] = alloca i32, align 4
157 // IR-PCH-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
158 // IR-PCH-NEXT: [[I11:%.*]] = alloca i32, align 4
159 // IR-PCH-NEXT: [[J12:%.*]] = alloca i32, align 4
160 // IR-PCH-NEXT: [[Z13:%.*]] = alloca i32, align 4
161 // IR-PCH-NEXT: store i32 [[T]], ptr [[T_ADDR]], align 4
162 // IR-PCH-NEXT: [[TMP0:%.*]] = load i32, ptr [[T_ADDR]], align 4
163 // IR-PCH-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4
164 // IR-PCH-NEXT: [[TMP1:%.*]] = load i32, ptr [[T_ADDR]], align 4
165 // IR-PCH-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_2]], align 4
166 // IR-PCH-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
167 // IR-PCH-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
168 // IR-PCH-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
169 // IR-PCH-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64
170 // IR-PCH-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
171 // IR-PCH-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP3]], 0
172 // IR-PCH-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
173 // IR-PCH-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
174 // IR-PCH-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
175 // IR-PCH-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
176 // IR-PCH-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8
177 // IR-PCH-NEXT: store i32 0, ptr [[I8]], align 4
178 // IR-PCH-NEXT: store i32 0, ptr [[J9]], align 4
179 // IR-PCH-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
180 // IR-PCH-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
181 // IR-PCH-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[SIMD_IF_END:%.*]]
182 // IR-PCH: land.lhs.true:
183 // IR-PCH-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
184 // IR-PCH-NEXT: [[CMP10:%.*]] = icmp slt i32 0, [[TMP5]]
185 // IR-PCH-NEXT: br i1 [[CMP10]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END]]
186 // IR-PCH: simd.if.then:
187 // IR-PCH-NEXT: store i64 0, ptr [[DOTOMP_IV]], align 8
188 // IR-PCH-NEXT: store i32 0, ptr [[Z13]], align 4
189 // IR-PCH-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
190 // IR-PCH: omp.inner.for.cond:
191 // IR-PCH-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3:![0-9]+]]
192 // IR-PCH-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !llvm.access.group [[ACC_GRP3]]
193 // IR-PCH-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP7]], 1
194 // IR-PCH-NEXT: [[CMP14:%.*]] = icmp slt i64 [[TMP6]], [[ADD]]
195 // IR-PCH-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
196 // IR-PCH: omp.inner.for.body:
197 // IR-PCH-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]]
198 // IR-PCH-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]]
199 // IR-PCH-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP9]], 0
200 // IR-PCH-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
201 // IR-PCH-NEXT: [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]]
202 // IR-PCH-NEXT: [[CONV18:%.*]] = sext i32 [[MUL17]] to i64
203 // IR-PCH-NEXT: [[DIV19:%.*]] = sdiv i64 [[TMP8]], [[CONV18]]
204 // IR-PCH-NEXT: [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1
205 // IR-PCH-NEXT: [[ADD21:%.*]] = add nsw i64 0, [[MUL20]]
206 // IR-PCH-NEXT: [[CONV22:%.*]] = trunc i64 [[ADD21]] to i32
207 // IR-PCH-NEXT: store i32 [[CONV22]], ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP3]]
208 // IR-PCH-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]]
209 // IR-PCH-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]]
210 // IR-PCH-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]]
211 // IR-PCH-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP12]], 0
212 // IR-PCH-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1
213 // IR-PCH-NEXT: [[MUL25:%.*]] = mul nsw i32 1, [[DIV24]]
214 // IR-PCH-NEXT: [[CONV26:%.*]] = sext i32 [[MUL25]] to i64
215 // IR-PCH-NEXT: [[DIV27:%.*]] = sdiv i64 [[TMP11]], [[CONV26]]
216 // IR-PCH-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]]
217 // IR-PCH-NEXT: [[SUB28:%.*]] = sub nsw i32 [[TMP13]], 0
218 // IR-PCH-NEXT: [[DIV29:%.*]] = sdiv i32 [[SUB28]], 1
219 // IR-PCH-NEXT: [[MUL30:%.*]] = mul nsw i32 1, [[DIV29]]
220 // IR-PCH-NEXT: [[CONV31:%.*]] = sext i32 [[MUL30]] to i64
221 // IR-PCH-NEXT: [[MUL32:%.*]] = mul nsw i64 [[DIV27]], [[CONV31]]
222 // IR-PCH-NEXT: [[SUB33:%.*]] = sub nsw i64 [[TMP10]], [[MUL32]]
223 // IR-PCH-NEXT: [[MUL34:%.*]] = mul nsw i64 [[SUB33]], 1
224 // IR-PCH-NEXT: [[ADD35:%.*]] = add nsw i64 0, [[MUL34]]
225 // IR-PCH-NEXT: [[CONV36:%.*]] = trunc i64 [[ADD35]] to i32
226 // IR-PCH-NEXT: store i32 [[CONV36]], ptr [[J12]], align 4, !llvm.access.group [[ACC_GRP3]]
227 // IR-PCH-NEXT: [[TMP14:%.*]] = load i32, ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP3]]
228 // IR-PCH-NEXT: [[TMP15:%.*]] = load i32, ptr [[J12]], align 4, !llvm.access.group [[ACC_GRP3]]
229 // IR-PCH-NEXT: [[ADD37:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
230 // IR-PCH-NEXT: [[TMP16:%.*]] = load i32, ptr [[Z13]], align 4, !llvm.access.group [[ACC_GRP3]]
231 // IR-PCH-NEXT: [[ADD38:%.*]] = add nsw i32 [[TMP16]], [[ADD37]]
232 // IR-PCH-NEXT: store i32 [[ADD38]], ptr [[Z13]], align 4, !llvm.access.group [[ACC_GRP3]]
233 // IR-PCH-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
234 // IR-PCH: omp.body.continue:
235 // IR-PCH-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
236 // IR-PCH: omp.inner.for.inc:
237 // IR-PCH-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]]
238 // IR-PCH-NEXT: [[ADD39:%.*]] = add nsw i64 [[TMP17]], 1
239 // IR-PCH-NEXT: store i64 [[ADD39]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]]
240 // IR-PCH-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
241 // IR-PCH: omp.inner.for.end:
242 // IR-PCH-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
243 // IR-PCH-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP18]], 0
244 // IR-PCH-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
245 // IR-PCH-NEXT: [[MUL42:%.*]] = mul nsw i32 [[DIV41]], 1
246 // IR-PCH-NEXT: [[ADD43:%.*]] = add nsw i32 0, [[MUL42]]
247 // IR-PCH-NEXT: store i32 [[ADD43]], ptr [[I11]], align 4
248 // IR-PCH-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
249 // IR-PCH-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP19]], 0
250 // IR-PCH-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
251 // IR-PCH-NEXT: [[MUL46:%.*]] = mul nsw i32 [[DIV45]], 1
252 // IR-PCH-NEXT: [[ADD47:%.*]] = add nsw i32 0, [[MUL46]]
253 // IR-PCH-NEXT: store i32 [[ADD47]], ptr [[J]], align 4
254 // IR-PCH-NEXT: [[TMP20:%.*]] = load i32, ptr [[Z]], align 4
255 // IR-PCH-NEXT: [[TMP21:%.*]] = load i32, ptr [[Z13]], align 4
256 // IR-PCH-NEXT: [[ADD48:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
257 // IR-PCH-NEXT: store i32 [[ADD48]], ptr [[Z]], align 4
258 // IR-PCH-NEXT: br label [[SIMD_IF_END]]
259 // IR-PCH: simd.if.end:
260 // IR-PCH-NEXT: ret void