1 ; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
2 ; RUN: llc < %s -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
4 ; FUNC-LABEL: {{^}}break_inserted_outside_of_loop:
6 ; SI: [[LOOP_LABEL:[A-Z0-9]+]]:
7 ; Lowered break instructin:
9 ; Lowered Loop instruction:
11 ; s_cbranch_execnz [[LOOP_LABEL]]
13 define amdgpu_kernel void @break_inserted_outside_of_loop(i32 addrspace(1)* %out, i32 %a) {
15 %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
17 %1 = trunc i32 %0 to i1
21 store i32 0, i32 addrspace(1)* %out
25 br i1 %1, label %ENDLOOP, label %ENDIF
29 ; FUNC-LABEL: {{^}}phi_cond_outside_loop:
31 ; SI: s_mov_b64 [[LEFT:s\[[0-9]+:[0-9]+\]]], 0
32 ; SI: s_mov_b64 [[PHI:s\[[0-9]+:[0-9]+\]]], 0
35 ; SI: v_cmp_eq_u32_e64 [[TMP:s\[[0-9]+:[0-9]+\]]],
39 ; SI: [[LOOP_LABEL:BB[0-9]+_[0-9]+]]: ; %loop
40 ; SI: s_mov_b64 [[TMP:s\[[0-9]+:[0-9]+\]]], [[LEFT]]
41 ; SI: s_and_b64 [[TMP1:s\[[0-9]+:[0-9]+\]]], exec, [[PHI]]
42 ; SI: s_or_b64 [[LEFT]], [[TMP1]], [[TMP]]
43 ; SI: s_andn2_b64 exec, exec, [[LEFT]]
44 ; SI: s_cbranch_execnz [[LOOP_LABEL]]
47 define amdgpu_kernel void @phi_cond_outside_loop(i32 %b) {
49 %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
50 %0 = icmp eq i32 %tid , 0
51 br i1 %0, label %if, label %else
57 %1 = icmp eq i32 %b, 0
61 %2 = phi i1 [0, %if], [%1, %else]
65 br i1 %2, label %exit, label %loop
71 ; FIXME: should emit s_endpgm
72 ; CHECK-LABEL: {{^}}switch_unreachable:
75 define amdgpu_kernel void @switch_unreachable(i32 addrspace(1)* %g, i8 addrspace(3)* %l, i32 %x) nounwind {
77 switch i32 %x, label %sw.default [
92 declare float @llvm.fabs.f32(float) nounwind readnone
94 ; This broke the old AMDIL cfg structurizer
95 ; FUNC-LABEL: {{^}}loop_land_info_assert:
96 ; SI: v_cmp_lt_i32_e64 [[CMP4:s\[[0-9:]+\]]], s{{[0-9]+}}, 4{{$}}
97 ; SI: s_and_b64 [[CMP4M:s\[[0-9]+:[0-9]+\]]], exec, [[CMP4]]
99 ; SI: [[WHILELOOP:BB[0-9]+_[0-9]+]]: ; %while.cond
100 ; SI: s_cbranch_vccz [[FOR_COND_PH:BB[0-9]+_[0-9]+]]
102 ; SI: [[CONVEX_EXIT:BB[0-9_]+]]
104 ; SI-NEXT: s_cbranch_vccnz [[ENDPGM:BB[0-9]+_[0-9]+]]
106 ; SI: s_cbranch_vccnz [[WHILELOOP]]
109 ; SI: buffer_store_dword
111 ; SI: [[FOR_COND_PH]]: ; %for.cond.preheader
112 ; SI: s_cbranch_vccz [[ENDPGM]]
116 define amdgpu_kernel void @loop_land_info_assert(i32 %c0, i32 %c1, i32 %c2, i32 %c3, i32 %x, i32 %y, i1 %arg) nounwind {
118 %cmp = icmp sgt i32 %c0, 0
119 br label %while.cond.outer
122 %tmp = load float, float addrspace(1)* undef
126 %cmp1 = icmp slt i32 %c1, 4
127 br i1 %cmp1, label %convex.exit, label %for.cond
130 %or = or i1 %cmp, %cmp1
131 br i1 %or, label %return, label %if.end
134 %tmp3 = call float @llvm.fabs.f32(float %tmp) nounwind readnone
135 %cmp2 = fcmp olt float %tmp3, 0x3E80000000000000
136 br i1 %cmp2, label %if.else, label %while.cond.outer
139 store volatile i32 3, i32 addrspace(1)* undef, align 4
143 %cmp3 = icmp slt i32 %c3, 1000
144 br i1 %cmp3, label %for.body, label %return
147 br i1 %cmp3, label %self.loop, label %if.end.2
150 %or.cond2 = or i1 %cmp3, %arg
151 br i1 %or.cond2, label %return, label %for.cond
160 declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
162 attributes #0 = { nounwind readnone }