1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -S -mtriple=amdgcn-- -lowerswitch -structurizecfg -si-annotate-control-flow < %s | FileCheck -check-prefix=OPT %s
3 ; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
5 ; Ensure two if.break calls, for both the inner and outer loops
6 ; FIXME: duplicate comparison
7 define amdgpu_vs void @multi_else_break(<4 x float> %vec, i32 %ub, i32 %cont) {
8 ; OPT-LABEL: @multi_else_break(
10 ; OPT-NEXT: br label [[LOOP_OUTER:%.*]]
12 ; OPT-NEXT: [[PHI_BROKEN2:%.*]] = phi i64 [ [[TMP8:%.*]], [[FLOW1:%.*]] ], [ 0, [[MAIN_BODY:%.*]] ]
13 ; OPT-NEXT: [[TMP43:%.*]] = phi i32 [ 0, [[MAIN_BODY]] ], [ [[TMP3:%.*]], [[FLOW1]] ]
14 ; OPT-NEXT: br label [[LOOP:%.*]]
16 ; OPT-NEXT: [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP6:%.*]], [[FLOW:%.*]] ], [ 0, [[LOOP_OUTER]] ]
17 ; OPT-NEXT: [[TMP45:%.*]] = phi i32 [ [[TMP43]], [[LOOP_OUTER]] ], [ [[TMP3]], [[FLOW]] ]
18 ; OPT-NEXT: [[TMP48:%.*]] = icmp slt i32 [[TMP45]], [[UB:%.*]]
19 ; OPT-NEXT: [[TMP0:%.*]] = call { i1, i64 } @llvm.amdgcn.if.i64(i1 [[TMP48]])
20 ; OPT-NEXT: [[TMP1:%.*]] = extractvalue { i1, i64 } [[TMP0]], 0
21 ; OPT-NEXT: [[TMP2:%.*]] = extractvalue { i1, i64 } [[TMP0]], 1
22 ; OPT-NEXT: br i1 [[TMP1]], label [[ENDIF:%.*]], label [[FLOW]]
24 ; OPT-NEXT: [[TMP3]] = phi i32 [ [[TMP47:%.*]], [[ENDIF]] ], [ undef, [[LOOP]] ]
25 ; OPT-NEXT: [[TMP4:%.*]] = phi i1 [ [[TMP51:%.*]], [[ENDIF]] ], [ true, [[LOOP]] ]
26 ; OPT-NEXT: [[TMP5:%.*]] = phi i1 [ [[TMP51_INV:%.*]], [[ENDIF]] ], [ true, [[LOOP]] ]
27 ; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP2]])
28 ; OPT-NEXT: [[TMP6]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP5]], i64 [[PHI_BROKEN]])
29 ; OPT-NEXT: [[TMP7:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP6]])
30 ; OPT-NEXT: br i1 [[TMP7]], label [[FLOW1]], label [[LOOP]]
32 ; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP6]])
33 ; OPT-NEXT: [[TMP8]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP4]], i64 [[PHI_BROKEN2]])
34 ; OPT-NEXT: [[TMP9:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP8]])
35 ; OPT-NEXT: br i1 [[TMP9]], label [[IF:%.*]], label [[LOOP_OUTER]]
37 ; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP8]])
40 ; OPT-NEXT: [[TMP47]] = add i32 [[TMP45]], 1
41 ; OPT-NEXT: [[TMP51]] = icmp eq i32 [[TMP47]], [[CONT:%.*]]
42 ; OPT-NEXT: [[TMP51_INV]] = xor i1 [[TMP51]], true
43 ; OPT-NEXT: br label [[FLOW]]
45 ; GCN-LABEL: multi_else_break:
46 ; GCN: ; %bb.0: ; %main_body
47 ; GCN-NEXT: s_mov_b64 s[0:1], 0
48 ; GCN-NEXT: v_mov_b32_e32 v0, 0
49 ; GCN-NEXT: s_branch .LBB0_2
50 ; GCN-NEXT: .LBB0_1: ; %loop.exit.guard
51 ; GCN-NEXT: ; in Loop: Header=BB0_2 Depth=1
52 ; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
53 ; GCN-NEXT: s_and_b64 s[2:3], exec, s[2:3]
54 ; GCN-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
55 ; GCN-NEXT: s_andn2_b64 exec, exec, s[0:1]
56 ; GCN-NEXT: s_cbranch_execz .LBB0_6
57 ; GCN-NEXT: .LBB0_2: ; %LOOP.outer
58 ; GCN-NEXT: ; =>This Loop Header: Depth=1
59 ; GCN-NEXT: ; Child Loop BB0_4 Depth 2
60 ; GCN-NEXT: ; implicit-def: $sgpr2_sgpr3
61 ; GCN-NEXT: ; implicit-def: $sgpr8_sgpr9
62 ; GCN-NEXT: ; implicit-def: $sgpr6_sgpr7
63 ; GCN-NEXT: s_mov_b64 s[4:5], 0
64 ; GCN-NEXT: s_branch .LBB0_4
65 ; GCN-NEXT: .LBB0_3: ; %Flow
66 ; GCN-NEXT: ; in Loop: Header=BB0_4 Depth=2
67 ; GCN-NEXT: s_or_b64 exec, exec, s[10:11]
68 ; GCN-NEXT: s_and_b64 s[10:11], exec, s[8:9]
69 ; GCN-NEXT: s_or_b64 s[4:5], s[10:11], s[4:5]
70 ; GCN-NEXT: s_andn2_b64 s[2:3], s[2:3], exec
71 ; GCN-NEXT: s_and_b64 s[10:11], s[6:7], exec
72 ; GCN-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11]
73 ; GCN-NEXT: s_andn2_b64 exec, exec, s[4:5]
74 ; GCN-NEXT: s_cbranch_execz .LBB0_1
75 ; GCN-NEXT: .LBB0_4: ; %LOOP
76 ; GCN-NEXT: ; Parent Loop BB0_2 Depth=1
77 ; GCN-NEXT: ; => This Inner Loop Header: Depth=2
78 ; GCN-NEXT: v_cmp_lt_i32_e32 vcc, v0, v4
79 ; GCN-NEXT: s_or_b64 s[6:7], s[6:7], exec
80 ; GCN-NEXT: s_or_b64 s[8:9], s[8:9], exec
81 ; GCN-NEXT: s_and_saveexec_b64 s[10:11], vcc
82 ; GCN-NEXT: s_cbranch_execz .LBB0_3
83 ; GCN-NEXT: ; %bb.5: ; %ENDIF
84 ; GCN-NEXT: ; in Loop: Header=BB0_4 Depth=2
85 ; GCN-NEXT: v_add_i32_e32 v0, vcc, 1, v0
86 ; GCN-NEXT: s_andn2_b64 s[6:7], s[6:7], exec
87 ; GCN-NEXT: v_cmp_ne_u32_e32 vcc, v5, v0
88 ; GCN-NEXT: s_andn2_b64 s[8:9], s[8:9], exec
89 ; GCN-NEXT: s_and_b64 s[12:13], vcc, exec
90 ; GCN-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
91 ; GCN-NEXT: s_branch .LBB0_3
92 ; GCN-NEXT: .LBB0_6: ; %IF
97 LOOP.outer: ; preds = %ENDIF, %main_body
98 %tmp43 = phi i32 [ 0, %main_body ], [ %tmp47, %ENDIF ]
101 LOOP: ; preds = %ENDIF, %LOOP.outer
102 %tmp45 = phi i32 [ %tmp43, %LOOP.outer ], [ %tmp47, %ENDIF ]
103 %tmp48 = icmp slt i32 %tmp45, %ub
104 br i1 %tmp48, label %ENDIF, label %IF
109 ENDIF: ; preds = %LOOP
110 %tmp47 = add i32 %tmp45, 1
111 %tmp51 = icmp eq i32 %tmp47, %cont
112 br i1 %tmp51, label %LOOP, label %LOOP.outer
115 define amdgpu_kernel void @multi_if_break_loop(i32 %arg) #0 {
116 ; OPT-LABEL: @multi_if_break_loop(
118 ; OPT-NEXT: [[ID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
119 ; OPT-NEXT: [[TMP:%.*]] = sub i32 [[ID]], [[ARG:%.*]]
120 ; OPT-NEXT: br label [[BB1:%.*]]
122 ; OPT-NEXT: [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP4:%.*]], [[FLOW4:%.*]] ], [ 0, [[BB:%.*]] ]
123 ; OPT-NEXT: [[LSR_IV:%.*]] = phi i32 [ undef, [[BB]] ], [ [[TMP2:%.*]], [[FLOW4]] ]
124 ; OPT-NEXT: [[LSR_IV_NEXT:%.*]] = add i32 [[LSR_IV]], 1
125 ; OPT-NEXT: [[CMP0:%.*]] = icmp slt i32 [[LSR_IV_NEXT]], 0
126 ; OPT-NEXT: [[LOAD0:%.*]] = load volatile i32, ptr addrspace(1) undef, align 4
127 ; OPT-NEXT: br label [[NODEBLOCK:%.*]]
129 ; OPT-NEXT: [[PIVOT:%.*]] = icmp sge i32 [[LOAD0]], 1
130 ; OPT-NEXT: br i1 [[PIVOT]], label [[LEAFBLOCK1:%.*]], label [[FLOW:%.*]]
132 ; OPT-NEXT: [[SWITCHLEAF2:%.*]] = icmp eq i32 [[LOAD0]], 1
133 ; OPT-NEXT: br i1 [[SWITCHLEAF2]], label [[CASE1:%.*]], label [[FLOW3:%.*]]
135 ; OPT-NEXT: [[TMP0:%.*]] = phi i32 [ [[LSR_IV_NEXT]], [[CASE1]] ], [ undef, [[LEAFBLOCK1]] ]
136 ; OPT-NEXT: [[TMP1:%.*]] = phi i1 [ [[CMP2:%.*]], [[CASE1]] ], [ true, [[LEAFBLOCK1]] ]
137 ; OPT-NEXT: br label [[FLOW]]
139 ; OPT-NEXT: [[SWITCHLEAF:%.*]] = icmp eq i32 [[LOAD0]], 0
140 ; OPT-NEXT: br i1 [[SWITCHLEAF]], label [[CASE0:%.*]], label [[FLOW5:%.*]]
142 ; OPT-NEXT: [[TMP2]] = phi i32 [ [[TMP9:%.*]], [[FLOW5]] ], [ [[TMP6:%.*]], [[FLOW]] ]
143 ; OPT-NEXT: [[TMP3:%.*]] = phi i1 [ [[TMP10:%.*]], [[FLOW5]] ], [ [[TMP7:%.*]], [[FLOW]] ]
144 ; OPT-NEXT: [[TMP4]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP3]], i64 [[PHI_BROKEN]])
145 ; OPT-NEXT: [[TMP5:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP4]])
146 ; OPT-NEXT: br i1 [[TMP5]], label [[BB9:%.*]], label [[BB1]]
148 ; OPT-NEXT: [[LOAD1:%.*]] = load volatile i32, ptr addrspace(1) undef, align 4
149 ; OPT-NEXT: [[CMP1:%.*]] = icmp sge i32 [[TMP]], [[LOAD1]]
150 ; OPT-NEXT: br label [[FLOW5]]
152 ; OPT-NEXT: [[TMP6]] = phi i32 [ [[TMP0]], [[FLOW3]] ], [ undef, [[NODEBLOCK]] ]
153 ; OPT-NEXT: [[TMP7]] = phi i1 [ [[TMP1]], [[FLOW3]] ], [ true, [[NODEBLOCK]] ]
154 ; OPT-NEXT: [[TMP8:%.*]] = phi i1 [ false, [[FLOW3]] ], [ true, [[NODEBLOCK]] ]
155 ; OPT-NEXT: br i1 [[TMP8]], label [[LEAFBLOCK:%.*]], label [[FLOW4]]
157 ; OPT-NEXT: [[LOAD2:%.*]] = load volatile i32, ptr addrspace(1) undef, align 4
158 ; OPT-NEXT: [[CMP2]] = icmp sge i32 [[TMP]], [[LOAD2]]
159 ; OPT-NEXT: br label [[FLOW3]]
161 ; OPT-NEXT: [[TMP9]] = phi i32 [ [[LSR_IV_NEXT]], [[CASE0]] ], [ undef, [[LEAFBLOCK]] ]
162 ; OPT-NEXT: [[TMP10]] = phi i1 [ [[CMP1]], [[CASE0]] ], [ [[TMP7]], [[LEAFBLOCK]] ]
163 ; OPT-NEXT: br label [[FLOW4]]
165 ; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP4]])
168 ; GCN-LABEL: multi_if_break_loop:
169 ; GCN: ; %bb.0: ; %bb
170 ; GCN-NEXT: s_load_dword s2, s[4:5], 0x9
171 ; GCN-NEXT: s_mov_b64 s[0:1], 0
172 ; GCN-NEXT: s_mov_b32 s3, 0xf000
173 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
174 ; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s2, v0
175 ; GCN-NEXT: s_mov_b32 s2, -1
176 ; GCN-NEXT: s_branch .LBB1_2
177 ; GCN-NEXT: .LBB1_1: ; %Flow4
178 ; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
179 ; GCN-NEXT: s_and_b64 s[4:5], exec, s[4:5]
180 ; GCN-NEXT: s_or_b64 s[0:1], s[4:5], s[0:1]
181 ; GCN-NEXT: s_andn2_b64 exec, exec, s[0:1]
182 ; GCN-NEXT: s_cbranch_execz .LBB1_9
183 ; GCN-NEXT: .LBB1_2: ; %bb1
184 ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
185 ; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0 glc
186 ; GCN-NEXT: s_waitcnt vmcnt(0)
187 ; GCN-NEXT: v_readfirstlane_b32 s8, v1
188 ; GCN-NEXT: s_mov_b64 s[4:5], -1
189 ; GCN-NEXT: s_cmp_lt_i32 s8, 1
190 ; GCN-NEXT: s_mov_b64 s[6:7], -1
191 ; GCN-NEXT: s_cbranch_scc1 .LBB1_6
192 ; GCN-NEXT: ; %bb.3: ; %LeafBlock1
193 ; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
194 ; GCN-NEXT: s_cmp_eq_u32 s8, 1
195 ; GCN-NEXT: s_cbranch_scc0 .LBB1_5
196 ; GCN-NEXT: ; %bb.4: ; %case1
197 ; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
198 ; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0 glc
199 ; GCN-NEXT: s_waitcnt vmcnt(0)
200 ; GCN-NEXT: v_cmp_ge_i32_e32 vcc, v0, v1
201 ; GCN-NEXT: s_orn2_b64 s[4:5], vcc, exec
202 ; GCN-NEXT: .LBB1_5: ; %Flow3
203 ; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
204 ; GCN-NEXT: s_mov_b64 s[6:7], 0
205 ; GCN-NEXT: .LBB1_6: ; %Flow
206 ; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
207 ; GCN-NEXT: s_and_b64 vcc, exec, s[6:7]
208 ; GCN-NEXT: s_cbranch_vccz .LBB1_1
209 ; GCN-NEXT: ; %bb.7: ; %LeafBlock
210 ; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
211 ; GCN-NEXT: s_cmp_eq_u32 s8, 0
212 ; GCN-NEXT: s_cbranch_scc0 .LBB1_1
213 ; GCN-NEXT: ; %bb.8: ; %case0
214 ; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
215 ; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0 glc
216 ; GCN-NEXT: s_waitcnt vmcnt(0)
217 ; GCN-NEXT: v_cmp_ge_i32_e32 vcc, v0, v1
218 ; GCN-NEXT: s_andn2_b64 s[4:5], s[4:5], exec
219 ; GCN-NEXT: s_and_b64 s[6:7], vcc, exec
220 ; GCN-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
221 ; GCN-NEXT: s_branch .LBB1_1
222 ; GCN-NEXT: .LBB1_9: ; %bb9
225 %id = call i32 @llvm.amdgcn.workitem.id.x()
226 %tmp = sub i32 %id, %arg
230 %lsr.iv = phi i32 [ undef, %bb ], [ %lsr.iv.next, %case0 ], [ %lsr.iv.next, %case1 ]
231 %lsr.iv.next = add i32 %lsr.iv, 1
232 %cmp0 = icmp slt i32 %lsr.iv.next, 0
233 %load0 = load volatile i32, ptr addrspace(1) undef, align 4
234 switch i32 %load0, label %bb9 [
240 %load1 = load volatile i32, ptr addrspace(1) undef, align 4
241 %cmp1 = icmp slt i32 %tmp, %load1
242 br i1 %cmp1, label %bb1, label %bb9
245 %load2 = load volatile i32, ptr addrspace(1) undef, align 4
246 %cmp2 = icmp slt i32 %tmp, %load2
247 br i1 %cmp2, label %bb1, label %bb9
253 declare i32 @llvm.amdgcn.workitem.id.x() #1
255 attributes #0 = { nounwind }
256 attributes #1 = { nounwind readnone }