1 ; RUN: llc -march=amdgcn -mcpu=verde < %s | FileCheck %s
3 ; Test a simple uniform loop that lives inside non-uniform control flow.
5 ; CHECK-LABEL: {{^}}test1:
6 ; CHECK: v_cmp_ne_u32_e32 vcc, 0
7 ; CHECK: s_and_saveexec_b64
8 ; CHECK-NEXT: ; mask branch
9 ; CHECK-NEXT: s_cbranch_execz BB{{[0-9]+_[0-9]+}}
10 ; CHECK-NEXT: BB{{[0-9]+_[0-9]+}}: ; %loop_body.preheader
12 ; CHECK: [[LOOP_BODY_LABEL:BB[0-9]+_[0-9]+]]:
13 ; CHECK: s_cbranch_vccz [[LOOP_BODY_LABEL]]
16 define amdgpu_ps void @test1(<8 x i32> inreg %rsrc, <2 x i32> %addr.base, i32 %y, i32 %p) {
18 %cc = icmp eq i32 %p, 0
19 br i1 %cc, label %out, label %loop_body
22 %counter = phi i32 [ 0, %main_body ], [ %incr, %loop_body ]
24 ; Prevent the loop from being optimized out
25 call void asm sideeffect "", "" ()
27 %incr = add i32 %counter, 1
28 %lc = icmp sge i32 %incr, 1000
29 br i1 %lc, label %out, label %loop_body
35 ; CHECK-LABEL: {{^}}test2:
36 ; CHECK: s_and_saveexec_b64
37 ; CHECK-NEXT: ; mask branch
38 ; CHECK-NEXT: s_cbranch_execz
39 define amdgpu_kernel void @test2(i32 addrspace(1)* %out, i32 %a, i32 %b) {
41 %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
42 %cc = icmp eq i32 %tid, 0
43 br i1 %cc, label %done1, label %if
46 %cmp = icmp eq i32 %a, 0
47 br i1 %cmp, label %done0, label %loop_body
50 %counter = phi i32 [ 0, %if ], [0, %done0], [ %incr, %loop_body ]
52 ; Prevent the loop from being optimized out
53 call void asm sideeffect "", "" ()
55 %incr = add i32 %counter, 1
56 %lc = icmp sge i32 %incr, 1000
57 br i1 %lc, label %done1, label %loop_body
60 %cmp0 = icmp eq i32 %b, 0
61 br i1 %cmp0, label %done1, label %loop_body
67 declare i32 @llvm.amdgcn.workitem.id.x() #1
69 attributes #1 = { nounwind readonly }