1 ; RUN: opt -S -mtriple=amdgcn-- -structurizecfg -si-annotate-control-flow < %s | FileCheck -check-prefix=OPT %s
2 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
4 ; OPT-LABEL: {{^}}define amdgpu_vs void @multi_else_break(
8 ; OPT: [[if:%[0-9]+]] = call { i1, i64 } @llvm.amdgcn.if.i64(
9 ; OPT: [[if_exec:%[0-9]+]] = extractvalue { i1, i64 } [[if]], 1
13 ; Ensure two if.break calls, for both the inner and outer loops
15 ; OPT: call void @llvm.amdgcn.end.cf
16 ; OPT-NEXT: call i64 @llvm.amdgcn.if.break.i64.i64(i1
17 ; OPT-NEXT: call i1 @llvm.amdgcn.loop.i64(i64
18 ; OPT-NEXT: call i64 @llvm.amdgcn.if.break.i64.i64(i1
22 ; GCN-LABEL: {{^}}multi_else_break:
25 ; GCN: s_mov_b64 [[LEFT_OUTER:s\[[0-9]+:[0-9]+\]]], 0{{$}}
27 ; GCN: [[FLOW2:BB[0-9]+_[0-9]+]]: ; %Flow2
28 ; GCN: s_or_b64 exec, exec, [[TMP0:s\[[0-9]+:[0-9]+\]]]
29 ; GCN: s_and_b64 [[TMP1:s\[[0-9]+:[0-9]+\]]], exec, [[BREAK_OUTER:s\[[0-9]+:[0-9]+\]]]
30 ; GCN: s_or_b64 [[TMP1]], [[TMP1]], [[LEFT_OUTER]]
31 ; GCN: s_mov_b64 [[LEFT_OUTER]], [[TMP1]]
32 ; GCN: s_andn2_b64 exec, exec, [[TMP1]]
33 ; GCN: s_cbranch_execz [[IF_BLOCK:BB[0-9]+_[0-9]+]]
35 ; GCN: [[OUTER_LOOP:BB[0-9]+_[0-9]+]]: ; %LOOP.outer{{$}}
36 ; GCN: s_mov_b64 [[LEFT_INNER:s\[[0-9]+:[0-9]+\]]], 0{{$}}
39 ; GCN: s_or_b64 exec, exec, [[SAVE_EXEC:s\[[0-9]+:[0-9]+\]]]
40 ; GCN: s_and_b64 [[TMP0]], exec, [[BREAK_INNER:s\[[0-9]+:[0-9]+\]]]
41 ; GCN: s_or_b64 [[TMP0]], [[TMP0]], [[LEFT_INNER]]
42 ; GCN: s_mov_b64 [[LEFT_INNER]], [[TMP0]]
43 ; GCN: s_andn2_b64 exec, exec, [[TMP0]]
44 ; GCN: s_cbranch_execz [[FLOW2]]
46 ; GCN: [[INNER_LOOP:BB[0-9]+_[0-9]+]]: ; %LOOP{{$}}
47 ; GCN: s_and_saveexec_b64 [[SAVE_EXEC]], vcc
49 ; FIXME: duplicate comparison
51 ; GCN-DAG: v_cmp_eq_u32_e32 vcc,
52 ; GCN-DAG: v_cmp_ne_u32_e64 [[TMP51NEG:s\[[0-9]+:[0-9]+\]]],
54 ; GCN: [[IF_BLOCK]]: ; %IF
56 define amdgpu_vs void @multi_else_break(<4 x float> %vec, i32 %ub, i32 %cont) {
60 LOOP.outer: ; preds = %ENDIF, %main_body
61 %tmp43 = phi i32 [ 0, %main_body ], [ %tmp47, %ENDIF ]
64 LOOP: ; preds = %ENDIF, %LOOP.outer
65 %tmp45 = phi i32 [ %tmp43, %LOOP.outer ], [ %tmp47, %ENDIF ]
66 %tmp47 = add i32 %tmp45, 1
67 %tmp48 = icmp slt i32 %tmp45, %ub
68 br i1 %tmp48, label %ENDIF, label %IF
73 ENDIF: ; preds = %LOOP
74 %tmp51 = icmp eq i32 %tmp47, %cont
75 br i1 %tmp51, label %LOOP, label %LOOP.outer
78 ; OPT-LABEL: define amdgpu_kernel void @multi_if_break_loop(
79 ; OPT: llvm.amdgcn.if.break
80 ; OPT: llvm.amdgcn.loop
81 ; OPT: llvm.amdgcn.if.break
82 ; OPT: llvm.amdgcn.end.cf
84 ; GCN-LABEL: {{^}}multi_if_break_loop:
85 ; GCN: s_mov_b64 [[BROKEN_THREADS_MASK:s\[[0-9]+:[0-9]+\]]], 0{{$}}
87 ; GCN: [[LOOP:BB[0-9]+_[0-9]+]]: ; %Flow4
88 ; GCN: s_and_b64 [[BROKEN_THREADS_MASK]], exec, [[BROKEN_THREADS_MASK]]
89 ; GCN: s_or_b64 [[BROKEN_THREADS_MASK]], [[BROKEN_THREADS_MASK]], [[SAVED:s\[[0-9]+:[0-9]+\]]]
90 ; GCN: s_andn2_b64 exec, exec, [[BROKEN_THREADS_MASK]]
91 ; GCN-NEXT: s_cbranch_execz [[LOOP_EXIT:BB[0-9]+_[0-9]+]]
94 ; GCN: buffer_load_dword [[LOAD0:v[0-9]+]],
95 ; GCN: s_mov_b64 [[SAVED]], [[BROKEN_THREADS_MASK]]
98 ; GCN: v_cmp_eq_u32_e32 vcc, 1, [[LOAD0]]
99 ; GCN: s_and_b64 vcc, exec, vcc
100 ; GCN: s_cbranch_vccz [[FLOW:BB[0-9]+_[0-9]+]]
103 ; GCN: buffer_load_dword [[LOAD2:v[0-9]+]],
104 ; GCN: v_cmp_ge_i32_e32 vcc, {{v[0-9]+}}, [[LOAD2]]
105 ; GCN: s_orn2_b64 [[BROKEN_THREADS_MASK]], vcc, exec
106 ; GCN: BB1_{{[0-9]+}}:
107 ; GCN: s_mov_b64 [[FALSE_MASK:s\[[0-9]+:[0-9]+\]]], 0
108 ; GCN: s_and_b64 vcc, exec, [[FALSE_MASK]]
109 ; GCN: s_cbranch_vccz [[LOOP]]
112 ; GCN: v_cmp_eq_u32_e32 vcc, 0, [[LOAD0]]
113 ; GCN: s_and_b64 vcc, exec, vcc
114 ; GCN: s_cbranch_vccz [[LOOP]]
117 ; GCN: buffer_load_dword [[LOAD1:v[0-9]+]],
118 ; GCN-DAG: v_cmp_ge_i32_e32 vcc, {{v[0-9]+}}, [[LOAD1]]
119 ; GCN: s_andn2_b64 [[BROKEN_THREADS_MASK]], [[BROKEN_THREADS_MASK]], exec
120 ; GCN: s_and_b64 [[TMP_MASK:s\[[0-9]+:[0-9]+\]]], vcc, exec
121 ; GCN: s_or_b64 [[BROKEN_THREADS_MASK]], [[BROKEN_THREADS_MASK]], [[TMP_MASK]]
122 ; GCN: s_branch [[LOOP]]
124 ; GCN: [[LOOP_EXIT]]: ; %Flow6
125 ; GCN: s_or_b64 exec, exec, [[BROKEN_THREADS_MASK]]
127 define amdgpu_kernel void @multi_if_break_loop(i32 %arg) #0 {
129 %id = call i32 @llvm.amdgcn.workitem.id.x()
130 %tmp = sub i32 %id, %arg
134 %lsr.iv = phi i32 [ undef, %bb ], [ %lsr.iv.next, %case0 ], [ %lsr.iv.next, %case1 ]
135 %lsr.iv.next = add i32 %lsr.iv, 1
136 %cmp0 = icmp slt i32 %lsr.iv.next, 0
137 %load0 = load volatile i32, i32 addrspace(1)* undef, align 4
138 switch i32 %load0, label %bb9 [
144 %load1 = load volatile i32, i32 addrspace(1)* undef, align 4
145 %cmp1 = icmp slt i32 %tmp, %load1
146 br i1 %cmp1, label %bb1, label %bb9
149 %load2 = load volatile i32, i32 addrspace(1)* undef, align 4
150 %cmp2 = icmp slt i32 %tmp, %load2
151 br i1 %cmp2, label %bb1, label %bb9
157 declare i32 @llvm.amdgcn.workitem.id.x() #1
159 attributes #0 = { nounwind }
160 attributes #1 = { nounwind readnone }