1 # RUN: llc -mtriple=amdgcn-- -run-pass=print-machine-uniformity -o - %s 2>&1 | FileCheck %s
2 # CHECK-LABEL: MachineUniformityInfo for function: hidden_diverge
3 # CHECK-LABEL: BLOCK bb.0
4 # CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.workitem.id.x)
5 # CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1) = G_ICMP intpred(slt)
6 # CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1) = G_XOR %{{[0-9]*}}:_, %{{[0-9]*}}:_
7 # CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1), %{{[0-9]*}}:_(s64) = G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.if)
8 # CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1), %{{[0-9]*}}:_(s64) = G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.if)
9 # CHECK: DIVERGENT: G_BRCOND %{{[0-9]*}}:_(s1), %bb.1
10 # CHECK: DIVERGENT: G_BR %bb.2
11 # CHECK-LABEL: BLOCK bb.1
12 # CHECK-LABEL: BLOCK bb.2
13 # CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_PHI %{{[0-9]*}}:_(s32), %bb.1, %{{[0-9]*}}:_(s32), %bb.0
14 # CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1) = G_PHI %{{[0-9]*}}:_(s1), %bb.1, %{{[0-9]*}}:_(s1), %bb.0
15 # CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1), %{{[0-9]*}}:_(s64) = G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.if)
16 # CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1), %{{[0-9]*}}:_(s64) = G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.if)
17 # CHECK: DIVERGENT: G_BRCOND %{{[0-9]*}}:_(s1), %bb.3
18 # CHECK: DIVERGENT: G_BR %bb.4
19 # CHECK-LABEL: BLOCK bb.3
20 # CHECK-LABEL: BLOCK bb.4
21 # CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_PHI %{{[0-9]*}}:_(s32), %bb.2, %{{[0-9]*}}:_(s32), %bb.3
25 tracksRegLiveness: true
28 successors: %bb.2(0x40000000), %bb.3(0x40000000)
31 %4:_(p4) = COPY $sgpr4_sgpr5
32 %15:_(s32) = G_CONSTANT i32 0
33 %17:_(s1) = G_CONSTANT i1 true
34 %23:_(s32) = G_CONSTANT i32 1
35 %30:_(s32) = G_CONSTANT i32 2
36 %32:_(p1) = G_IMPLICIT_DEF
37 %33:_(s32) = G_IMPLICIT_DEF
38 %8:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
39 %9:_(<3 x s32>) = G_LOAD %8(p4) :: (dereferenceable invariant load (<3 x s32>), align 16, addrspace 4)
40 %10:_(s64) = G_CONSTANT i64 4
41 %11:_(p4) = G_PTR_ADD %8, %10(s64)
42 %12:_(s64) = G_CONSTANT i64 8
43 %13:_(p4) = G_PTR_ADD %8, %12(s64)
44 %14:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.workitem.id.x)
45 %16:_(s1) = G_ICMP intpred(slt), %14(s32), %15
46 %18:_(s1) = G_XOR %16, %17
47 %19:_(s1), %20:_(s64) = G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.if), %16(s1)
48 G_BRCOND %19(s1), %bb.2
52 successors: %bb.3(0x80000000)
54 %21:_(s32) = G_EXTRACT_VECTOR_ELT %9(<3 x s32>), %15(s32)
55 %22:_(s32) = G_EXTRACT_VECTOR_ELT %9(<3 x s32>), %23(s32)
56 %24:_(s1) = G_ICMP intpred(slt), %21(s32), %15
59 successors: %bb.4(0x40000000), %bb.5(0x40000000)
61 %25:_(s32) = G_PHI %22(s32), %bb.2, %33(s32), %bb.1
62 %26:_(s1) = G_PHI %24(s1), %bb.2, %18(s1), %bb.1
63 G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), %20(s64)
64 %27:_(s1), %28:_(s64) = G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.if), %26(s1)
65 G_BRCOND %27(s1), %bb.4
69 successors: %bb.5(0x80000000)
71 %29:_(s32) = G_EXTRACT_VECTOR_ELT %9(<3 x s32>), %30(s32)
74 %31:_(s32) = G_PHI %25(s32), %bb.3, %29(s32), %bb.4
75 G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), %28(s64)
76 G_STORE %31(s32), %32(p1) :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1)