1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
2 ; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -passes=amdgpu-promote-alloca < %s | FileCheck %s
4 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
6 define amdgpu_kernel void @test_overwrite(i64 %val, i1 %cond) {
7 ; CHECK-LABEL: define amdgpu_kernel void @test_overwrite
8 ; CHECK-SAME: (i64 [[VAL:%.*]], i1 [[COND:%.*]]) {
10 ; CHECK-NEXT: br i1 [[COND]], label [[LOOP:%.*]], label [[END:%.*]]
12 ; CHECK-NEXT: [[PROMOTEALLOCA:%.*]] = phi <3 x i64> [ [[TMP2:%.*]], [[LOOP]] ], [ <i64 43, i64 undef, i64 undef>, [[ENTRY:%.*]] ]
13 ; CHECK-NEXT: [[TMP0:%.*]] = extractelement <3 x i64> [[PROMOTEALLOCA]], i32 0
14 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <3 x i64> [[PROMOTEALLOCA]], i64 68, i32 0
15 ; CHECK-NEXT: [[TMP2]] = insertelement <3 x i64> [[TMP1]], i64 32, i32 0
16 ; CHECK-NEXT: [[LOOP_CC:%.*]] = icmp ne i64 [[TMP0]], 68
17 ; CHECK-NEXT: br i1 [[LOOP_CC]], label [[LOOP]], label [[END]]
19 ; CHECK-NEXT: [[PROMOTEALLOCA1:%.*]] = phi <3 x i64> [ [[TMP2]], [[LOOP]] ], [ <i64 43, i64 undef, i64 undef>, [[ENTRY]] ]
20 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <3 x i64> [[PROMOTEALLOCA1]], i32 0
21 ; CHECK-NEXT: ret void
24 %stack = alloca [3 x i64], align 4, addrspace(5)
25 store i64 43, ptr addrspace(5) %stack
26 br i1 %cond, label %loop, label %end
29 %load.0 = load i64, ptr addrspace(5) %stack
30 store i64 68, ptr addrspace(5) %stack
31 %load.1 = load i64, ptr addrspace(5) %stack
32 store i64 32, ptr addrspace(5) %stack
33 %loop.cc = icmp ne i64 %load.0, %load.1
34 br i1 %loop.cc, label %loop, label %end
37 %reload = load i64, ptr addrspace(5) %stack
41 define <4 x i64> @test_fullvec_out_of_bounds(<4 x i64> %arg) {
42 ; CHECK-LABEL: define <4 x i64> @test_fullvec_out_of_bounds
43 ; CHECK-SAME: (<4 x i64> [[ARG:%.*]]) {
45 ; CHECK-NEXT: [[TMP0:%.*]] = extractelement <4 x i64> [[ARG]], i64 0
46 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i64> undef, i64 [[TMP0]], i32 3
47 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[ARG]], i64 1
48 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[ARG]], i64 2
49 ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[ARG]], i64 3
50 ; CHECK-NEXT: ret <4 x i64> poison
53 %stack = alloca [4 x i64], align 4, addrspace(5)
54 %stack.2 = getelementptr inbounds [4 x i64], ptr addrspace(5) %stack, i32 0, i32 2
55 %stack.3 = getelementptr inbounds [4 x i64], ptr addrspace(5) %stack, i32 0, i32 3
56 store <4 x i64> %arg, ptr addrspace(5) %stack.3
57 %reload = load <4 x i64>, ptr addrspace(5) %stack.2
61 define amdgpu_kernel void @test_no_overwrite(i64 %val, i1 %cond) {
62 ; CHECK-LABEL: define amdgpu_kernel void @test_no_overwrite
63 ; CHECK-SAME: (i64 [[VAL:%.*]], i1 [[COND:%.*]]) {
65 ; CHECK-NEXT: br i1 [[COND]], label [[LOOP:%.*]], label [[END:%.*]]
67 ; CHECK-NEXT: [[PROMOTEALLOCA:%.*]] = phi <3 x i64> [ [[TMP1:%.*]], [[LOOP]] ], [ <i64 43, i64 undef, i64 undef>, [[ENTRY:%.*]] ]
68 ; CHECK-NEXT: [[TMP0:%.*]] = extractelement <3 x i64> [[PROMOTEALLOCA]], i32 0
69 ; CHECK-NEXT: [[TMP1]] = insertelement <3 x i64> [[PROMOTEALLOCA]], i64 32, i32 1
70 ; CHECK-NEXT: [[LOOP_CC:%.*]] = icmp ne i64 [[TMP0]], 32
71 ; CHECK-NEXT: br i1 [[LOOP_CC]], label [[LOOP]], label [[END]]
73 ; CHECK-NEXT: [[PROMOTEALLOCA1:%.*]] = phi <3 x i64> [ [[TMP1]], [[LOOP]] ], [ <i64 43, i64 undef, i64 undef>, [[ENTRY]] ]
74 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <3 x i64> [[PROMOTEALLOCA1]], i32 0
75 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <3 x i64> [[PROMOTEALLOCA1]], i32 1
76 ; CHECK-NEXT: ret void
79 %stack = alloca [3 x i64], align 4, addrspace(5)
80 %stack.1 = getelementptr inbounds i64, ptr addrspace(5) %stack, i32 1
81 store i64 43, ptr addrspace(5) %stack
82 br i1 %cond, label %loop, label %end
85 %load = load i64, ptr addrspace(5) %stack
86 store i64 32, ptr addrspace(5) %stack.1
87 %loop.cc = icmp ne i64 %load, 32
88 br i1 %loop.cc, label %loop, label %end
91 %reload = load i64, ptr addrspace(5) %stack
92 %reload.1 = load i64, ptr addrspace(5) %stack.1
96 define ptr @alloca_load_store_ptr64_full_ivec(ptr %arg) {
97 ; CHECK-LABEL: define ptr @alloca_load_store_ptr64_full_ivec
98 ; CHECK-SAME: (ptr [[ARG:%.*]]) {
100 ; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[ARG]] to i64
101 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[TMP0]] to <8 x i8>
102 ; CHECK-NEXT: ret ptr [[ARG]]
105 %alloca = alloca [8 x i8], align 8, addrspace(5)
106 store ptr %arg, ptr addrspace(5) %alloca, align 8
107 %tmp = load ptr, ptr addrspace(5) %alloca, align 8
111 define ptr addrspace(3) @alloca_load_store_ptr32_full_ivec(ptr addrspace(3) %arg) {
112 ; CHECK-LABEL: define ptr addrspace(3) @alloca_load_store_ptr32_full_ivec
113 ; CHECK-SAME: (ptr addrspace(3) [[ARG:%.*]]) {
115 ; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr addrspace(3) [[ARG]] to i32
116 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[TMP0]] to <4 x i8>
117 ; CHECK-NEXT: ret ptr addrspace(3) [[ARG]]
120 %alloca = alloca [4 x i8], align 8, addrspace(5)
121 store ptr addrspace(3) %arg, ptr addrspace(5) %alloca, align 8
122 %tmp = load ptr addrspace(3), ptr addrspace(5) %alloca, align 8
123 ret ptr addrspace(3) %tmp
126 define <4 x ptr addrspace(3)> @alloca_load_store_ptr_mixed_full_ptrvec(<2 x ptr> %arg) {
127 ; CHECK-LABEL: define <4 x ptr addrspace(3)> @alloca_load_store_ptr_mixed_full_ptrvec
128 ; CHECK-SAME: (<2 x ptr> [[ARG:%.*]]) {
130 ; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint <2 x ptr> [[ARG]] to <2 x i64>
131 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <4 x i32>
132 ; CHECK-NEXT: [[TMP2:%.*]] = inttoptr <4 x i32> [[TMP1]] to <4 x ptr addrspace(3)>
133 ; CHECK-NEXT: ret <4 x ptr addrspace(3)> [[TMP2]]
136 %alloca = alloca [4 x i32], align 8, addrspace(5)
137 store <2 x ptr> %arg, ptr addrspace(5) %alloca, align 8
138 %tmp = load <4 x ptr addrspace(3)>, ptr addrspace(5) %alloca, align 8
139 ret <4 x ptr addrspace(3)> %tmp
142 define <8 x i16> @ptralloca_load_store_ints_full(<2 x i64> %arg) {
143 ; CHECK-LABEL: define <8 x i16> @ptralloca_load_store_ints_full
144 ; CHECK-SAME: (<2 x i64> [[ARG:%.*]]) {
146 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[ARG]] to <4 x i32>
147 ; CHECK-NEXT: [[TMP1:%.*]] = inttoptr <4 x i32> [[TMP0]] to <4 x ptr addrspace(5)>
148 ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to <8 x i16>
149 ; CHECK-NEXT: ret <8 x i16> [[TMP2]]
152 %stack = alloca [4 x ptr addrspace(5)], align 4, addrspace(5)
153 store <2 x i64> %arg, ptr addrspace(5) %stack
154 %reload = load <8 x i16>, ptr addrspace(5) %stack
155 ret <8 x i16> %reload
158 define void @alloca_load_store_ptr_mixed_ptrvec(<2 x ptr addrspace(3)> %arg) {
159 ; CHECK-LABEL: define void @alloca_load_store_ptr_mixed_ptrvec
160 ; CHECK-SAME: (<2 x ptr addrspace(3)> [[ARG:%.*]]) {
162 ; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint <2 x ptr addrspace(3)> [[ARG]] to <2 x i32>
163 ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i32> [[TMP0]], i64 0
164 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i32> undef, i32 [[TMP1]], i32 0
165 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i32> [[TMP0]], i64 1
166 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x i32> [[TMP2]], i32 [[TMP3]], i32 1
167 ; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x i32> poison, i32 [[TMP1]], i64 0
168 ; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> [[TMP5]], i32 [[TMP3]], i64 1
169 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr <2 x i32> [[TMP6]] to <2 x ptr addrspace(3)>
170 ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1]], i64 0
171 ; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP3]], i64 1
172 ; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> [[TMP9]], i32 undef, i64 2
173 ; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i32> [[TMP10]], i32 undef, i64 3
174 ; CHECK-NEXT: [[TMP12:%.*]] = inttoptr <4 x i32> [[TMP11]] to <4 x ptr addrspace(3)>
175 ; CHECK-NEXT: ret void
178 %alloca = alloca [8 x i32], align 8, addrspace(5)
179 store <2 x ptr addrspace(3)> %arg, ptr addrspace(5) %alloca, align 8
180 %tmp = load <2 x ptr addrspace(3)>, ptr addrspace(5) %alloca, align 8
181 %tmp.full = load <4 x ptr addrspace(3)>, ptr addrspace(5) %alloca, align 8
185 ; Will not vectorize because we're accessing a 64 bit vector with a 32 bits pointer.
186 define ptr addrspace(3) @alloca_load_store_ptr_mixed_full_ivec(ptr addrspace(3) %arg) {
187 ; CHECK-LABEL: define ptr addrspace(3) @alloca_load_store_ptr_mixed_full_ivec
188 ; CHECK-SAME: (ptr addrspace(3) [[ARG:%.*]]) {
190 ; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [8 x i8], align 8, addrspace(5)
191 ; CHECK-NEXT: store ptr addrspace(3) [[ARG]], ptr addrspace(5) [[ALLOCA]], align 8
192 ; CHECK-NEXT: [[TMP:%.*]] = load ptr addrspace(3), ptr addrspace(5) [[ALLOCA]], align 8
193 ; CHECK-NEXT: ret ptr addrspace(3) [[TMP]]
196 %alloca = alloca [8 x i8], align 8, addrspace(5)
197 store ptr addrspace(3) %arg, ptr addrspace(5) %alloca, align 8
198 %tmp = load ptr addrspace(3), ptr addrspace(5) %alloca, align 8
199 ret ptr addrspace(3) %tmp