1 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-4,-unaligned-scratch-access -load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT4,ELT4-ALIGNED,ALIGNED,ALL %s
2 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-8,-unaligned-scratch-access -load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT8,ELT8-ALIGNED,ALIGNED,ALL %s
3 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-16,-unaligned-scratch-access -load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT16,ELT16-ALIGNED,ALIGNED,ALL %s
4 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-4,+unaligned-scratch-access -load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT4,ELT4-UNALIGNED,UNALIGNED,ALL %s
5 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-8,+unaligned-scratch-access -load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT8,ELT8-UNALIGNED,UNALIGNED,ALL %s
6 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-16,+unaligned-scratch-access -load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT16,ELT16-UNALIGNED,UNALIGNED,ALL %s
8 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
10 ; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i32
11 ; ELT4-ALIGNED: store i32
12 ; ELT4-ALIGNED: store i32
13 ; ELT4-ALIGNED: store i32
14 ; ELT4-ALIGNED: store i32
16 ; ELT8: store <2 x i32>
17 ; ELT8: store <2 x i32>
19 ; ELT16-UNALIGNED: store <4 x i32>
20 define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32(i32 addrspace(5)* %out) #0 {
21 %out.gep.1 = getelementptr i32, i32 addrspace(5)* %out, i32 1
22 %out.gep.2 = getelementptr i32, i32 addrspace(5)* %out, i32 2
23 %out.gep.3 = getelementptr i32, i32 addrspace(5)* %out, i32 3
25 store i32 9, i32 addrspace(5)* %out
26 store i32 1, i32 addrspace(5)* %out.gep.1
27 store i32 23, i32 addrspace(5)* %out.gep.2
28 store i32 19, i32 addrspace(5)* %out.gep.3
32 ; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i32_align1(
33 ; ALIGNED: store i32 9, i32 addrspace(5)* %out, align 1
34 ; ALIGNED: store i32 1, i32 addrspace(5)* %out.gep.1, align 1
35 ; ALIGNED: store i32 23, i32 addrspace(5)* %out.gep.2, align 1
36 ; ALIGNED: store i32 19, i32 addrspace(5)* %out.gep.3, align 1
38 ; ELT16-UNALIGNED: store <4 x i32> <i32 9, i32 1, i32 23, i32 19>, <4 x i32> addrspace(5)* %1, align 1
40 ; ELT8-UNALIGNED: store <2 x i32> <i32 9, i32 1>, <2 x i32> addrspace(5)* %1, align 1
41 ; ELT8-UNALIGNED: store <2 x i32> <i32 23, i32 19>, <2 x i32> addrspace(5)* %2, align 1
43 ; ELT4-UNALIGNED: store i32
44 ; ELT4-UNALIGNED: store i32
45 ; ELT4-UNALIGNED: store i32
46 ; ELT4-UNALIGNED: store i32
47 define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32_align1(i32 addrspace(5)* %out) #0 {
48 %out.gep.1 = getelementptr i32, i32 addrspace(5)* %out, i32 1
49 %out.gep.2 = getelementptr i32, i32 addrspace(5)* %out, i32 2
50 %out.gep.3 = getelementptr i32, i32 addrspace(5)* %out, i32 3
52 store i32 9, i32 addrspace(5)* %out, align 1
53 store i32 1, i32 addrspace(5)* %out.gep.1, align 1
54 store i32 23, i32 addrspace(5)* %out.gep.2, align 1
55 store i32 19, i32 addrspace(5)* %out.gep.3, align 1
59 ; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i32_align2(
60 ; ALIGNED: store i32 9, i32 addrspace(5)* %out, align 2
61 ; ALIGNED: store i32 1, i32 addrspace(5)* %out.gep.1, align 2
62 ; ALIGNED: store i32 23, i32 addrspace(5)* %out.gep.2, align 2
63 ; ALIGNED: store i32 19, i32 addrspace(5)* %out.gep.3, align 2
65 ; ELT16-UNALIGNED: store <4 x i32> <i32 9, i32 1, i32 23, i32 19>, <4 x i32> addrspace(5)* %1, align 2
67 ; ELT8-UNALIGNED: store <2 x i32>
68 ; ELT8-UNALIGNED: store <2 x i32>
70 ; ELT4-UNALIGNED: store i32
71 ; ELT4-UNALIGNED: store i32
72 ; ELT4-UNALIGNED: store i32
73 ; ELT4-UNALIGNED: store i32
74 define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32_align2(i32 addrspace(5)* %out) #0 {
75 %out.gep.1 = getelementptr i32, i32 addrspace(5)* %out, i32 1
76 %out.gep.2 = getelementptr i32, i32 addrspace(5)* %out, i32 2
77 %out.gep.3 = getelementptr i32, i32 addrspace(5)* %out, i32 3
79 store i32 9, i32 addrspace(5)* %out, align 2
80 store i32 1, i32 addrspace(5)* %out.gep.1, align 2
81 store i32 23, i32 addrspace(5)* %out.gep.2, align 2
82 store i32 19, i32 addrspace(5)* %out.gep.3, align 2
86 ; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i8(
88 define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i8(i8 addrspace(5)* %out) #0 {
89 %out.gep.1 = getelementptr i8, i8 addrspace(5)* %out, i32 1
90 %out.gep.2 = getelementptr i8, i8 addrspace(5)* %out, i32 2
91 %out.gep.3 = getelementptr i8, i8 addrspace(5)* %out, i32 3
93 store i8 9, i8 addrspace(5)* %out, align 4
94 store i8 1, i8 addrspace(5)* %out.gep.1
95 store i8 23, i8 addrspace(5)* %out.gep.2
96 store i8 19, i8 addrspace(5)* %out.gep.3
100 ; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i8_align1(
106 ; UNALIGNED: store <4 x i8> <i8 9, i8 1, i8 23, i8 19>, <4 x i8> addrspace(5)* %1, align 1
107 define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i8_align1(i8 addrspace(5)* %out) #0 {
108 %out.gep.1 = getelementptr i8, i8 addrspace(5)* %out, i32 1
109 %out.gep.2 = getelementptr i8, i8 addrspace(5)* %out, i32 2
110 %out.gep.3 = getelementptr i8, i8 addrspace(5)* %out, i32 3
112 store i8 9, i8 addrspace(5)* %out, align 1
113 store i8 1, i8 addrspace(5)* %out.gep.1, align 1
114 store i8 23, i8 addrspace(5)* %out.gep.2, align 1
115 store i8 19, i8 addrspace(5)* %out.gep.3, align 1
119 ; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v2i16(
120 ; ALL: store <2 x i16>
121 define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16(i16 addrspace(5)* %out) #0 {
122 %out.gep.1 = getelementptr i16, i16 addrspace(5)* %out, i32 1
124 store i16 9, i16 addrspace(5)* %out, align 4
125 store i16 12, i16 addrspace(5)* %out.gep.1
129 ; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v2i16_align2(
133 ; UNALIGNED: store <2 x i16> <i16 9, i16 12>, <2 x i16> addrspace(5)* %1, align 2
134 define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16_align2(i16 addrspace(5)* %out) #0 {
135 %out.gep.1 = getelementptr i16, i16 addrspace(5)* %out, i32 1
137 store i16 9, i16 addrspace(5)* %out, align 2
138 store i16 12, i16 addrspace(5)* %out.gep.1, align 2
142 ; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v2i16_align1(
146 ; UNALIGNED: store <2 x i16> <i16 9, i16 12>, <2 x i16> addrspace(5)* %1, align 1
147 define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16_align1(i16 addrspace(5)* %out) #0 {
148 %out.gep.1 = getelementptr i16, i16 addrspace(5)* %out, i32 1
150 store i16 9, i16 addrspace(5)* %out, align 1
151 store i16 12, i16 addrspace(5)* %out.gep.1, align 1
155 ; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v2i16_align8(
156 ; ALL: store <2 x i16> <i16 9, i16 12>, <2 x i16> addrspace(5)* %1, align 8
157 define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16_align8(i16 addrspace(5)* %out) #0 {
158 %out.gep.1 = getelementptr i16, i16 addrspace(5)* %out, i32 1
160 store i16 9, i16 addrspace(5)* %out, align 8
161 store i16 12, i16 addrspace(5)* %out.gep.1, align 2
165 ; ALL-LABEL: @merge_private_store_3_vector_elts_loads_v4i32
170 ; ELT8: store <2 x i32>
173 ; ELT16: store <3 x i32>
174 define amdgpu_kernel void @merge_private_store_3_vector_elts_loads_v4i32(i32 addrspace(5)* %out) #0 {
175 %out.gep.1 = getelementptr i32, i32 addrspace(5)* %out, i32 1
176 %out.gep.2 = getelementptr i32, i32 addrspace(5)* %out, i32 2
178 store i32 9, i32 addrspace(5)* %out
179 store i32 1, i32 addrspace(5)* %out.gep.1
180 store i32 23, i32 addrspace(5)* %out.gep.2
184 ; ALL-LABEL: @merge_private_store_3_vector_elts_loads_v4i32_align1(
189 ; ELT4-UNALIGNED: store i32
190 ; ELT4-UNALIGNED: store i32
191 ; ELT4-UNALIGNED: store i32
193 ; ELT8-UNALIGNED: store <2 x i32>
194 ; ELT8-UNALIGNED: store i32
196 ; ELT16-UNALIGNED: store <3 x i32>
197 define amdgpu_kernel void @merge_private_store_3_vector_elts_loads_v4i32_align1(i32 addrspace(5)* %out) #0 {
198 %out.gep.1 = getelementptr i32, i32 addrspace(5)* %out, i32 1
199 %out.gep.2 = getelementptr i32, i32 addrspace(5)* %out, i32 2
201 store i32 9, i32 addrspace(5)* %out, align 1
202 store i32 1, i32 addrspace(5)* %out.gep.1, align 1
203 store i32 23, i32 addrspace(5)* %out.gep.2, align 1
207 ; ALL-LABEL: @merge_private_store_3_vector_elts_loads_v4i8_align1(
212 ; UNALIGNED: store <3 x i8>
213 define amdgpu_kernel void @merge_private_store_3_vector_elts_loads_v4i8_align1(i8 addrspace(5)* %out) #0 {
214 %out.gep.1 = getelementptr i8, i8 addrspace(5)* %out, i8 1
215 %out.gep.2 = getelementptr i8, i8 addrspace(5)* %out, i8 2
217 store i8 9, i8 addrspace(5)* %out, align 1
218 store i8 1, i8 addrspace(5)* %out.gep.1, align 1
219 store i8 23, i8 addrspace(5)* %out.gep.2, align 1
223 attributes #0 = { nounwind }