1 ; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
2 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
4 ; This should end with an no-op sequence of exec mask manipulations
5 ; Mask should be in original state after executed unreachable block
8 ; GCN-LABEL: {{^}}uniform_br_trivial_ret_divergent_br_trivial_unreachable:
9 ; GCN: s_cbranch_scc1 [[RET_BB:BB[0-9]+_[0-9]+]]
13 ; GCN: s_and_saveexec_b64 [[SAVE_EXEC:s\[[0-9]+:[0-9]+\]]], vcc
14 ; GCN-NEXT: ; mask branch [[FLOW:BB[0-9]+_[0-9]+]]
16 ; GCN: BB{{[0-9]+_[0-9]+}}: ; %unreachable.bb
17 ; GCN-NEXT: ; divergent unreachable
19 ; GCN-NEXT: {{^}}[[FLOW]]: ; %Flow
20 ; GCN-NEXT: s_or_b64 exec, exec
22 ; GCN-NEXT: [[RET_BB]]:
24 ; GCN-NEXT: .Lfunc_end0
25 define amdgpu_ps <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @uniform_br_trivial_ret_divergent_br_trivial_unreachable([9 x <4 x i32>] addrspace(4)* inreg %arg, [17 x <4 x i32>] addrspace(4)* inreg %arg1, [17 x <8 x i32>] addrspace(4)* inreg %arg2, i32 addrspace(4)* inreg %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, i32 inreg %arg17, i32 %arg18, i32 %arg19, float %arg20, i32 %arg21) #0 {
27 %i.i = extractelement <2 x i32> %arg7, i32 0
28 %j.i = extractelement <2 x i32> %arg7, i32 1
29 %i.f.i = bitcast i32 %i.i to float
30 %j.f.i = bitcast i32 %j.i to float
31 %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 1, i32 0, i32 %arg5) #2
32 %p2 = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 1, i32 0, i32 %arg5) #2
33 %p87 = fmul float %p2, %p2
34 %p88 = fadd float %p87, %p87
35 %p93 = fadd float %p88, %p88
36 %p97 = fmul float %p93, %p93
37 %p102 = fsub float %p97, %p97
38 %p104 = fmul float %p102, %p102
39 %p106 = fadd float 0.000000e+00, %p104
40 %p108 = fadd float %p106, %p106
41 %uniform.cond = icmp slt i32 %arg17, 0
42 br i1 %uniform.cond, label %ret.bb, label %else
44 else: ; preds = %main_body
45 %p124 = fmul float %p108, %p108
46 %p125 = fsub float %p124, %p124
47 %divergent.cond = fcmp olt float %p125, 0.000000e+00
48 br i1 %divergent.cond, label %ret.bb, label %unreachable.bb
50 unreachable.bb: ; preds = %else
53 ret.bb: ; preds = %else, %main_body
54 ret <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> undef
57 ; GCN-LABEL: {{^}}uniform_br_nontrivial_ret_divergent_br_nontrivial_unreachable:
58 ; GCN: s_cbranch_vccnz [[RET_BB:BB[0-9]+_[0-9]+]]
60 ; GCN: ; %bb.{{[0-9]+}}: ; %else
61 ; GCN: s_and_saveexec_b64 [[SAVE_EXEC:s\[[0-9]+:[0-9]+\]]], vcc
62 ; GCN-NEXT: ; mask branch [[FLOW1:BB[0-9]+_[0-9]+]]
64 ; GCN-NEXT: ; %unreachable.bb
66 ; GCN: ; divergent unreachable
71 ; GCN: ; %UnifiedReturnBlock
72 ; GCN-NEXT: s_or_b64 exec, exec
75 ; GCN-NEXT: .Lfunc_end
76 define amdgpu_ps <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @uniform_br_nontrivial_ret_divergent_br_nontrivial_unreachable([9 x <4 x i32>] addrspace(4)* inreg %arg, [17 x <4 x i32>] addrspace(4)* inreg %arg1, [17 x <8 x i32>] addrspace(4)* inreg %arg2, i32 addrspace(4)* inreg %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, i32 inreg %arg18, i32 %arg19, float %arg20, i32 %arg21) #0 {
78 %i.i = extractelement <2 x i32> %arg7, i32 0
79 %j.i = extractelement <2 x i32> %arg7, i32 1
80 %i.f.i = bitcast i32 %i.i to float
81 %j.f.i = bitcast i32 %j.i to float
82 %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 1, i32 0, i32 %arg5) #2
83 %p2 = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 1, i32 0, i32 %arg5) #2
84 %p87 = fmul float %p2, %p2
85 %p88 = fadd float %p87, %p87
86 %p93 = fadd float %p88, %p88
87 %p97 = fmul float %p93, %p93
88 %p102 = fsub float %p97, %p97
89 %p104 = fmul float %p102, %p102
90 %p106 = fadd float 0.000000e+00, %p104
91 %p108 = fadd float %p106, %p106
92 %uniform.cond = icmp slt i32 %arg18, 0
93 br i1 %uniform.cond, label %ret.bb, label %else
95 else: ; preds = %main_body
96 %p124 = fmul float %p108, %p108
97 %p125 = fsub float %p124, %p124
98 %divergent.cond = fcmp olt float %p125, 0.000000e+00
99 br i1 %divergent.cond, label %ret.bb, label %unreachable.bb
101 unreachable.bb: ; preds = %else
102 store volatile i32 8, i32 addrspace(3)* undef
105 ret.bb: ; preds = %else, %main_body
106 store volatile i32 11, i32 addrspace(1)* undef
107 ret <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> undef
110 ; Function Attrs: nounwind readnone
111 declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #1
113 ; Function Attrs: nounwind readnone
114 declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #1
116 ; Function Attrs: nounwind readnone
117 declare float @llvm.amdgcn.interp.mov(i32, i32, i32, i32) #1
119 ; Function Attrs: nounwind readnone
120 declare float @llvm.fabs.f32(float) #1
122 ; Function Attrs: nounwind readnone
123 declare float @llvm.sqrt.f32(float) #1
125 ; Function Attrs: nounwind readnone
126 declare float @llvm.floor.f32(float) #1
128 attributes #0 = { "InitialPSInputAddr"="36983" }
129 attributes #1 = { nounwind readnone }
130 attributes #2 = { nounwind }