1 ; RUN: opt -mtriple=amdgcn-- -passes='loop-unroll,simplifycfg,sroa' %s -S -o - | FileCheck %s
2 ; RUN: opt -mtriple=r600-- -passes='loop-unroll,simplifycfg,sroa' %s -S -o - | FileCheck %s
4 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
6 ; This test contains a simple loop that initializes an array declared in
7 ; private memory. We want to make sure these kinds of loops are always
8 ; unrolled, because private memory is slow.
10 ; CHECK-LABEL: @private_memory
12 ; CHECK: store i32 5, ptr addrspace(1) %out
13 define amdgpu_kernel void @private_memory(ptr addrspace(1) %out) {
15 %0 = alloca [32 x i32], addrspace(5)
19 %counter = phi i32 [0, %entry], [%inc, %loop.inc]
23 %ptr = getelementptr [32 x i32], ptr addrspace(5) %0, i32 0, i32 %counter
24 store i32 %counter, ptr addrspace(5) %ptr
28 %inc = add i32 %counter, 1
29 %1 = icmp sge i32 %counter, 32
30 br i1 %1, label %exit, label %loop.header
33 %2 = getelementptr [32 x i32], ptr addrspace(5) %0, i32 0, i32 5
34 %3 = load i32, ptr addrspace(5) %2
35 store i32 %3, ptr addrspace(1) %out
39 ; Check that loop is unrolled for local memory references
41 ; CHECK-LABEL: @local_memory
42 ; CHECK: getelementptr i32, ptr addrspace(1) %out, i32 128
45 define amdgpu_kernel void @local_memory(ptr addrspace(1) %out, ptr addrspace(3) %lds) {
50 %counter = phi i32 [0, %entry], [%inc, %loop.inc]
54 %ptr_lds = getelementptr i32, ptr addrspace(3) %lds, i32 %counter
55 %val = load i32, ptr addrspace(3) %ptr_lds
56 %ptr_out = getelementptr i32, ptr addrspace(1) %out, i32 %counter
57 store i32 %val, ptr addrspace(1) %ptr_out
61 %inc = add i32 %counter, 1
62 %cond = icmp sge i32 %counter, 128
63 br i1 %cond, label %exit, label %loop.header
69 ; Check that a loop with if inside completely unrolled to eliminate phi and if
71 ; CHECK-LABEL: @unroll_for_if
73 ; CHECK-NEXT: getelementptr
75 ; CHECK-NEXT: getelementptr
78 define amdgpu_kernel void @unroll_for_if(ptr addrspace(5) %a) {
82 for.body: ; preds = %entry, %for.inc
83 %i1 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
84 %tobool = icmp eq i32 %i1, 0
85 br i1 %tobool, label %for.inc, label %if.then
87 if.then: ; preds = %for.body
88 %0 = sext i32 %i1 to i64
89 %arrayidx = getelementptr inbounds i32, ptr addrspace(5) %a, i64 %0
90 store i32 0, ptr addrspace(5) %arrayidx, align 4
93 for.inc: ; preds = %for.body, %if.then
94 %inc = add nuw nsw i32 %i1, 1
95 %cmp = icmp ult i32 %inc, 38
96 br i1 %cmp, label %for.body, label %for.end
98 for.end: ; preds = %for.cond
102 ; Check that runtime unroll is enabled for local memory references
104 ; CHECK-LABEL: @local_memory_runtime
105 ; CHECK: loop.header:
106 ; CHECK: load i32, ptr addrspace(3)
107 ; CHECK: load i32, ptr addrspace(3)
109 ; CHECK: loop.header.epil
110 ; CHECK: load i32, ptr addrspace(3)
112 define amdgpu_kernel void @local_memory_runtime(ptr addrspace(1) %out, ptr addrspace(3) %lds, i32 %n) {
114 br label %loop.header
117 %counter = phi i32 [0, %entry], [%inc, %loop.inc]
121 %ptr_lds = getelementptr i32, ptr addrspace(3) %lds, i32 %counter
122 %val = load i32, ptr addrspace(3) %ptr_lds
123 %ptr_out = getelementptr i32, ptr addrspace(1) %out, i32 %counter
124 store i32 %val, ptr addrspace(1) %ptr_out
128 %inc = add i32 %counter, 1
129 %cond = icmp sge i32 %counter, %n
130 br i1 %cond, label %exit, label %loop.header