1 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
2 ; RUN: llc -march=amdgcn -verify-machineinstrs -O0 < %s
4 ; GCN-LABEL: {{^}}test_loop:
5 ; GCN: s_and_b64 vcc, exec, -1
6 ; GCN: [[LABEL:BB[0-9+]_[0-9]+]]: ; %for.body{{$}}
9 ; GCN: s_cbranch_vccnz [[LABEL]]
11 define amdgpu_kernel void @test_loop(float addrspace(3)* %ptr, i32 %n) nounwind {
13 %cmp = icmp eq i32 %n, -1
14 br i1 %cmp, label %for.exit, label %for.body
20 %indvar = phi i32 [ %inc, %for.body ], [ 0, %entry ]
21 %tmp = add i32 %indvar, 32
22 %arrayidx = getelementptr float, float addrspace(3)* %ptr, i32 %tmp
23 %vecload = load float, float addrspace(3)* %arrayidx, align 4
24 %add = fadd float %vecload, 1.0
25 store float %add, float addrspace(3)* %arrayidx, align 8
26 %inc = add i32 %indvar, 1
30 ; GCN-LABEL: @loop_const_true
31 ; GCN: [[LABEL:BB[0-9+]_[0-9]+]]:
34 ; GCN: s_branch [[LABEL]]
35 define amdgpu_kernel void @loop_const_true(float addrspace(3)* %ptr, i32 %n) nounwind {
43 %indvar = phi i32 [ %inc, %for.body ], [ 0, %entry ]
44 %tmp = add i32 %indvar, 32
45 %arrayidx = getelementptr float, float addrspace(3)* %ptr, i32 %tmp
46 %vecload = load float, float addrspace(3)* %arrayidx, align 4
47 %add = fadd float %vecload, 1.0
48 store float %add, float addrspace(3)* %arrayidx, align 8
49 %inc = add i32 %indvar, 1
50 br i1 true, label %for.body, label %for.exit
53 ; GCN-LABEL: {{^}}loop_const_false:
56 define amdgpu_kernel void @loop_const_false(float addrspace(3)* %ptr, i32 %n) nounwind {
63 ; XXX - Should there be an S_ENDPGM?
65 %indvar = phi i32 [ %inc, %for.body ], [ 0, %entry ]
66 %tmp = add i32 %indvar, 32
67 %arrayidx = getelementptr float, float addrspace(3)* %ptr, i32 %tmp
68 %vecload = load float, float addrspace(3)* %arrayidx, align 4
69 %add = fadd float %vecload, 1.0
70 store float %add, float addrspace(3)* %arrayidx, align 8
71 %inc = add i32 %indvar, 1
72 br i1 false, label %for.body, label %for.exit
75 ; GCN-LABEL: {{^}}loop_const_undef:
78 define amdgpu_kernel void @loop_const_undef(float addrspace(3)* %ptr, i32 %n) nounwind {
85 ; XXX - Should there be an s_endpgm?
87 %indvar = phi i32 [ %inc, %for.body ], [ 0, %entry ]
88 %tmp = add i32 %indvar, 32
89 %arrayidx = getelementptr float, float addrspace(3)* %ptr, i32 %tmp
90 %vecload = load float, float addrspace(3)* %arrayidx, align 4
91 %add = fadd float %vecload, 1.0
92 store float %add, float addrspace(3)* %arrayidx, align 8
93 %inc = add i32 %indvar, 1
94 br i1 undef, label %for.body, label %for.exit
97 ; GCN-LABEL: {{^}}loop_arg_0:
98 ; GCN: v_and_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
99 ; GCN: v_cmp_eq_u32{{[^,]*}}, 1,
101 ; GCN: [[LOOPBB:BB[0-9]+_[0-9]+]]
102 ; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80
103 ; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, 4
105 ; GCN: s_cbranch_vccnz [[LOOPBB]]
108 define amdgpu_kernel void @loop_arg_0(float addrspace(3)* %ptr, i32 %n) nounwind {
110 %cond = load volatile i1, i1 addrspace(3)* null
117 %indvar = phi i32 [ %inc, %for.body ], [ 0, %entry ]
118 %tmp = add i32 %indvar, 32
119 %arrayidx = getelementptr float, float addrspace(3)* %ptr, i32 %tmp
120 %vecload = load float, float addrspace(3)* %arrayidx, align 4
121 %add = fadd float %vecload, 1.0
122 store float %add, float addrspace(3)* %arrayidx, align 8
123 %inc = add i32 %indvar, 1
124 br i1 %cond, label %for.body, label %for.exit