1 // Check code generation
2 // RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=IR
4 // Check same results after serialization round-trip
5 // RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-pch -o %t %s
6 // RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -include-pch %t -emit-llvm %s -o - | FileCheck %s --check-prefix=IR
7 // expected-no-diagnostics
12 // placeholder for loop body code.
13 extern "C" void body(...) {}
17 // IR-NEXT: [[ENTRY:.*]]:
18 // IR-NEXT: %[[START_ADDR:.+]] = alloca i32, align 4
19 // IR-NEXT: %[[END_ADDR:.+]] = alloca i32, align 4
20 // IR-NEXT: %[[STEP_ADDR:.+]] = alloca i32, align 4
21 // IR-NEXT: %[[I:.+]] = alloca i32, align 4
22 // IR-NEXT: %[[DOTOMP_IV:.+]] = alloca i32, align 4
23 // IR-NEXT: %[[TMP:.+]] = alloca i32, align 4
24 // IR-NEXT: %[[DOTCAPTURE_EXPR_:.+]] = alloca i32, align 4
25 // IR-NEXT: %[[DOTCAPTURE_EXPR_1:.+]] = alloca i32, align 4
26 // IR-NEXT: %[[DOTCAPTURE_EXPR_2:.+]] = alloca i32, align 4
27 // IR-NEXT: %[[DOTCAPTURE_EXPR_3:.+]] = alloca i32, align 4
28 // IR-NEXT: %[[DOTCAPTURE_EXPR_6:.+]] = alloca i32, align 4
29 // IR-NEXT: %[[DOTCAPTURE_EXPR_8:.+]] = alloca i32, align 4
30 // IR-NEXT: %[[DOTUNROLLED_IV_I:.+]] = alloca i32, align 4
31 // IR-NEXT: %[[DOTOMP_LB:.+]] = alloca i32, align 4
32 // IR-NEXT: %[[DOTOMP_UB:.+]] = alloca i32, align 4
33 // IR-NEXT: %[[DOTOMP_STRIDE:.+]] = alloca i32, align 4
34 // IR-NEXT: %[[DOTOMP_IS_LAST:.+]] = alloca i32, align 4
35 // IR-NEXT: %[[DOTUNROLLED_IV_I12:.+]] = alloca i32, align 4
36 // IR-NEXT: %[[DOTUNROLL_INNER_IV_I:.+]] = alloca i32, align 4
37 // IR-NEXT: %[[TMP0:.+]] = call i32 @__kmpc_global_thread_num(ptr @2)
38 // IR-NEXT: store i32 %[[START:.+]], ptr %[[START_ADDR]], align 4
39 // IR-NEXT: store i32 %[[END:.+]], ptr %[[END_ADDR]], align 4
40 // IR-NEXT: store i32 %[[STEP:.+]], ptr %[[STEP_ADDR]], align 4
41 // IR-NEXT: %[[TMP1:.+]] = load i32, ptr %[[START_ADDR]], align 4
42 // IR-NEXT: store i32 %[[TMP1]], ptr %[[DOTCAPTURE_EXPR_]], align 4
43 // IR-NEXT: %[[TMP2:.+]] = load i32, ptr %[[END_ADDR]], align 4
44 // IR-NEXT: store i32 %[[TMP2]], ptr %[[DOTCAPTURE_EXPR_1]], align 4
45 // IR-NEXT: %[[TMP3:.+]] = load i32, ptr %[[STEP_ADDR]], align 4
46 // IR-NEXT: store i32 %[[TMP3]], ptr %[[DOTCAPTURE_EXPR_2]], align 4
47 // IR-NEXT: %[[TMP4:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_1]], align 4
48 // IR-NEXT: %[[TMP5:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_]], align 4
49 // IR-NEXT: %[[SUB:.+]] = sub i32 %[[TMP4]], %[[TMP5]]
50 // IR-NEXT: %[[SUB4:.+]] = sub i32 %[[SUB]], 1
51 // IR-NEXT: %[[TMP6:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
52 // IR-NEXT: %[[ADD:.+]] = add i32 %[[SUB4]], %[[TMP6]]
53 // IR-NEXT: %[[TMP7:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
54 // IR-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP7]]
55 // IR-NEXT: %[[SUB5:.+]] = sub i32 %[[DIV]], 1
56 // IR-NEXT: store i32 %[[SUB5]], ptr %[[DOTCAPTURE_EXPR_3]], align 4
57 // IR-NEXT: %[[TMP8:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_3]], align 4
58 // IR-NEXT: %[[ADD7:.+]] = add i32 %[[TMP8]], 1
59 // IR-NEXT: store i32 %[[ADD7]], ptr %[[DOTCAPTURE_EXPR_6]], align 4
60 // IR-NEXT: %[[TMP9:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_6]], align 4
61 // IR-NEXT: %[[SUB9:.+]] = sub i32 %[[TMP9]], -1
62 // IR-NEXT: %[[DIV10:.+]] = udiv i32 %[[SUB9]], 2
63 // IR-NEXT: %[[SUB11:.+]] = sub i32 %[[DIV10]], 1
64 // IR-NEXT: store i32 %[[SUB11]], ptr %[[DOTCAPTURE_EXPR_8]], align 4
65 // IR-NEXT: store i32 0, ptr %[[DOTUNROLLED_IV_I]], align 4
66 // IR-NEXT: %[[TMP10:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_6]], align 4
67 // IR-NEXT: %[[CMP:.+]] = icmp ult i32 0, %[[TMP10]]
68 // IR-NEXT: br i1 %[[CMP]], label %[[OMP_PRECOND_THEN:.+]], label %[[OMP_PRECOND_END:.+]]
70 // IR-NEXT: [[OMP_PRECOND_THEN]]:
71 // IR-NEXT: store i32 0, ptr %[[DOTOMP_LB]], align 4
72 // IR-NEXT: %[[TMP11:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_8]], align 4
73 // IR-NEXT: store i32 %[[TMP11]], ptr %[[DOTOMP_UB]], align 4
74 // IR-NEXT: store i32 1, ptr %[[DOTOMP_STRIDE]], align 4
75 // IR-NEXT: store i32 0, ptr %[[DOTOMP_IS_LAST]], align 4
76 // IR-NEXT: call void @__kmpc_for_static_init_4u(ptr @1, i32 %[[TMP0]], i32 34, ptr %[[DOTOMP_IS_LAST]], ptr %[[DOTOMP_LB]], ptr %[[DOTOMP_UB]], ptr %[[DOTOMP_STRIDE]], i32 1, i32 1)
77 // IR-NEXT: %[[TMP12:.+]] = load i32, ptr %[[DOTOMP_UB]], align 4
78 // IR-NEXT: %[[TMP13:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_8]], align 4
79 // IR-NEXT: %[[CMP13:.+]] = icmp ugt i32 %[[TMP12]], %[[TMP13]]
80 // IR-NEXT: br i1 %[[CMP13]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
82 // IR-NEXT: [[COND_TRUE]]:
83 // IR-NEXT: %[[TMP14:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_8]], align 4
84 // IR-NEXT: br label %[[COND_END:.+]]
86 // IR-NEXT: [[COND_FALSE]]:
87 // IR-NEXT: %[[TMP15:.+]] = load i32, ptr %[[DOTOMP_UB]], align 4
88 // IR-NEXT: br label %[[COND_END]]
90 // IR-NEXT: [[COND_END]]:
91 // IR-NEXT: %[[COND:.+]] = phi i32 [ %[[TMP14]], %[[COND_TRUE]] ], [ %[[TMP15]], %[[COND_FALSE]] ]
92 // IR-NEXT: store i32 %[[COND]], ptr %[[DOTOMP_UB]], align 4
93 // IR-NEXT: %[[TMP16:.+]] = load i32, ptr %[[DOTOMP_LB]], align 4
94 // IR-NEXT: store i32 %[[TMP16]], ptr %[[DOTOMP_IV]], align 4
95 // IR-NEXT: br label %[[OMP_INNER_FOR_COND:.+]]
97 // IR-NEXT: [[OMP_INNER_FOR_COND]]:
98 // IR-NEXT: %[[TMP17:.+]] = load i32, ptr %[[DOTOMP_IV]], align 4
99 // IR-NEXT: %[[TMP18:.+]] = load i32, ptr %[[DOTOMP_UB]], align 4
100 // IR-NEXT: %[[ADD14:.+]] = add i32 %[[TMP18]], 1
101 // IR-NEXT: %[[CMP15:.+]] = icmp ult i32 %[[TMP17]], %[[ADD14]]
102 // IR-NEXT: br i1 %[[CMP15]], label %[[OMP_INNER_FOR_BODY:.+]], label %[[OMP_INNER_FOR_END:.+]]
104 // IR-NEXT: [[OMP_INNER_FOR_BODY]]:
105 // IR-NEXT: %[[TMP19:.+]] = load i32, ptr %[[DOTOMP_IV]], align 4
106 // IR-NEXT: %[[MUL:.+]] = mul i32 %[[TMP19]], 2
107 // IR-NEXT: %[[ADD16:.+]] = add i32 0, %[[MUL]]
108 // IR-NEXT: store i32 %[[ADD16]], ptr %[[DOTUNROLLED_IV_I12]], align 4
109 // IR-NEXT: %[[TMP20:.+]] = load i32, ptr %[[DOTUNROLLED_IV_I12]], align 4
110 // IR-NEXT: store i32 %[[TMP20]], ptr %[[DOTUNROLL_INNER_IV_I]], align 4
111 // IR-NEXT: br label %[[FOR_COND:.+]]
113 // IR-NEXT: [[FOR_COND]]:
114 // IR-NEXT: %[[TMP21:.+]] = load i32, ptr %[[DOTUNROLL_INNER_IV_I]], align 4
115 // IR-NEXT: %[[TMP22:.+]] = load i32, ptr %[[DOTUNROLLED_IV_I12]], align 4
116 // IR-NEXT: %[[ADD17:.+]] = add i32 %[[TMP22]], 2
117 // IR-NEXT: %[[CMP18:.+]] = icmp ult i32 %[[TMP21]], %[[ADD17]]
118 // IR-NEXT: br i1 %[[CMP18]], label %[[LAND_RHS:.+]], label %[[LAND_END:.+]]
120 // IR-NEXT: [[LAND_RHS]]:
121 // IR-NEXT: %[[TMP23:.+]] = load i32, ptr %[[DOTUNROLL_INNER_IV_I]], align 4
122 // IR-NEXT: %[[TMP24:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_3]], align 4
123 // IR-NEXT: %[[ADD19:.+]] = add i32 %[[TMP24]], 1
124 // IR-NEXT: %[[CMP20:.+]] = icmp ult i32 %[[TMP23]], %[[ADD19]]
125 // IR-NEXT: br label %[[LAND_END]]
127 // IR-NEXT: [[LAND_END]]:
128 // IR-NEXT: %[[TMP25:.+]] = phi i1 [ false, %[[FOR_COND]] ], [ %[[CMP20]], %[[LAND_RHS]] ]
129 // IR-NEXT: br i1 %[[TMP25]], label %[[FOR_BODY:.+]], label %[[FOR_END:.+]]
131 // IR-NEXT: [[FOR_BODY]]:
132 // IR-NEXT: %[[TMP26:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_]], align 4
133 // IR-NEXT: %[[TMP27:.+]] = load i32, ptr %[[DOTUNROLL_INNER_IV_I]], align 4
134 // IR-NEXT: %[[TMP28:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
135 // IR-NEXT: %[[MUL21:.+]] = mul i32 %[[TMP27]], %[[TMP28]]
136 // IR-NEXT: %[[ADD22:.+]] = add i32 %[[TMP26]], %[[MUL21]]
137 // IR-NEXT: store i32 %[[ADD22]], ptr %[[I]], align 4
138 // IR-NEXT: %[[TMP29:.+]] = load i32, ptr %[[START_ADDR]], align 4
139 // IR-NEXT: %[[TMP30:.+]] = load i32, ptr %[[END_ADDR]], align 4
140 // IR-NEXT: %[[TMP31:.+]] = load i32, ptr %[[STEP_ADDR]], align 4
141 // IR-NEXT: %[[TMP32:.+]] = load i32, ptr %[[I]], align 4
142 // IR-NEXT: call void (...) @body(i32 noundef %[[TMP29]], i32 noundef %[[TMP30]], i32 noundef %[[TMP31]], i32 noundef %[[TMP32]])
143 // IR-NEXT: br label %[[FOR_INC:.+]]
145 // IR-NEXT: [[FOR_INC]]:
146 // IR-NEXT: %[[TMP33:.+]] = load i32, ptr %[[DOTUNROLL_INNER_IV_I]], align 4
147 // IR-NEXT: %[[INC:.+]] = add i32 %[[TMP33]], 1
148 // IR-NEXT: store i32 %[[INC]], ptr %[[DOTUNROLL_INNER_IV_I]], align 4
149 // IR-NEXT: br label %[[FOR_COND]], !llvm.loop ![[LOOP2:[0-9]+]]
151 // IR-NEXT: [[FOR_END]]:
152 // IR-NEXT: br label %[[OMP_BODY_CONTINUE:.+]]
154 // IR-NEXT: [[OMP_BODY_CONTINUE]]:
155 // IR-NEXT: br label %[[OMP_INNER_FOR_INC:.+]]
157 // IR-NEXT: [[OMP_INNER_FOR_INC]]:
158 // IR-NEXT: %[[TMP34:.+]] = load i32, ptr %[[DOTOMP_IV]], align 4
159 // IR-NEXT: %[[ADD23:.+]] = add i32 %[[TMP34]], 1
160 // IR-NEXT: store i32 %[[ADD23]], ptr %[[DOTOMP_IV]], align 4
161 // IR-NEXT: br label %[[OMP_INNER_FOR_COND]]
163 // IR-NEXT: [[OMP_INNER_FOR_END]]:
164 // IR-NEXT: br label %[[OMP_LOOP_EXIT:.+]]
166 // IR-NEXT: [[OMP_LOOP_EXIT]]:
167 // IR-NEXT: call void @__kmpc_for_static_fini(ptr @1, i32 %[[TMP0]])
168 // IR-NEXT: br label %[[OMP_PRECOND_END]]
170 // IR-NEXT: [[OMP_PRECOND_END]]:
171 // IR-NEXT: call void @__kmpc_barrier(ptr @3, i32 %[[TMP0]])
174 extern "C" void func(int start
, int end
, int step
) {
177 #pragma omp unroll partial
178 for (i
= start
; i
< end
; i
+=step
)
179 body(start
, end
, step
, i
);
185 // IR: ![[LOOP2]] = distinct !{![[LOOP2]], ![[LOOPPROP3:[0-9]+]], ![[LOOPPROP4:[0-9]+]]}
186 // IR: ![[LOOPPROP3]] = !{!"llvm.loop.mustprogress"}
187 // IR: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.count", i32 2}