1 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
3 ; GCN-LABEL: {{^}}negated_cond:
5 ; GCN: v_cmp_eq_u32_e64 [[CC:[^,]+]],
7 ; GCN-NOT: v_cndmask_b32
9 ; GCN: s_andn2_b64 vcc, exec, [[CC]]
10 ; GCN: s_cbranch_vccnz .LBB0_2
11 define amdgpu_kernel void @negated_cond(i32 addrspace(1)* %arg1) {
16 %tmp1 = load i32, i32 addrspace(1)* %arg1
17 %tmp2 = icmp eq i32 %tmp1, 0
21 %tmp3 = phi i32 [ 0, %bb1 ], [ %tmp6, %bb4 ]
22 %tmp4 = shl i32 %tmp3, 5
23 br i1 %tmp2, label %bb3, label %bb4
26 %tmp5 = add i32 %tmp4, 1
30 %tmp6 = phi i32 [ %tmp5, %bb3 ], [ %tmp4, %bb2 ]
31 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i32 %tmp6
32 store i32 0, i32 addrspace(1)* %gep
33 %tmp7 = icmp eq i32 %tmp6, 32
34 br i1 %tmp7, label %bb1, label %bb2
37 ; GCN-LABEL: {{^}}negated_cond_dominated_blocks:
39 ; GCN: s_cselect_b64 [[CC1:[^,]+]], -1, 0
40 ; GCN: s_branch [[BB1:.LBB[0-9]+_[0-9]+]]
41 ; GCN: [[BB0:.LBB[0-9]+_[0-9]+]]
42 ; GCN-NOT: v_cndmask_b32
45 ; GCN: s_mov_b64 vcc, [[CC1]]
46 ; GCN: s_cbranch_vccz [[BB2:.LBB[0-9]+_[0-9]+]]
47 ; GCN: s_mov_b64 vcc, exec
48 ; GCN: s_cbranch_execnz [[BB0]]
50 define amdgpu_kernel void @negated_cond_dominated_blocks(i32 addrspace(1)* %arg1) {
55 %tmp1 = load i32, i32 addrspace(1)* %arg1
56 %tmp2 = icmp eq i32 %tmp1, 0
63 %tmp3 = phi i32 [ 0, %bb2 ], [ %tmp7, %bb7 ]
64 %tmp4 = shl i32 %tmp3, 5
65 br i1 %tmp2, label %bb5, label %bb6
68 %tmp5 = add i32 %tmp4, 1
72 %tmp6 = add i32 %tmp3, 1
76 %tmp7 = phi i32 [ %tmp5, %bb5 ], [ %tmp6, %bb6 ]
77 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i32 %tmp7
78 store i32 0, i32 addrspace(1)* %gep
79 %tmp8 = icmp eq i32 %tmp7, 32
80 br i1 %tmp8, label %bb3, label %bb4