[clang] Handle __declspec() attributes in using
[llvm-project.git] / clang / test / CodeGen / PowerPC / builtins-ppc-xl-xst.c
blob81a0345d3f3e8a30557953e30e23ae8eaadfa290
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: powerpc-registered-target
3 // RUN: %clang_cc1 -flax-vector-conversions=none -target-feature +altivec -target-feature +vsx \
4 // RUN: -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
5 // RUN: %clang_cc1 -flax-vector-conversions=none -target-feature +altivec -target-feature +vsx \
6 // RUN: -target-feature +power8-vector -triple powerpc64le-unknown-unknown \
7 // RUN: -emit-llvm %s -o - | FileCheck %s -check-prefixes=CHECK,CHECK-P8
8 #include <altivec.h>
10 // CHECK-LABEL: @test1(
11 // CHECK-NEXT: entry:
12 // CHECK-NEXT: [[__VEC_ADDR_I:%.*]] = alloca <8 x i16>, align 16
13 // CHECK-NEXT: [[__OFFSET_ADDR_I1:%.*]] = alloca i64, align 8
14 // CHECK-NEXT: [[__PTR_ADDR_I2:%.*]] = alloca ptr, align 8
15 // CHECK-NEXT: [[__ADDR_I3:%.*]] = alloca ptr, align 8
16 // CHECK-NEXT: [[__OFFSET_ADDR_I:%.*]] = alloca i64, align 8
17 // CHECK-NEXT: [[__PTR_ADDR_I:%.*]] = alloca ptr, align 8
18 // CHECK-NEXT: [[__ADDR_I:%.*]] = alloca ptr, align 8
19 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
20 // CHECK-NEXT: [[ST_ADDR:%.*]] = alloca ptr, align 8
21 // CHECK-NEXT: [[LD_ADDR:%.*]] = alloca ptr, align 8
22 // CHECK-NEXT: store ptr [[C:%.*]], ptr [[C_ADDR]], align 8
23 // CHECK-NEXT: store ptr [[ST:%.*]], ptr [[ST_ADDR]], align 8
24 // CHECK-NEXT: store ptr [[LD:%.*]], ptr [[LD_ADDR]], align 8
25 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[LD_ADDR]], align 8
26 // CHECK-NEXT: store i64 3, ptr [[__OFFSET_ADDR_I]], align 8
27 // CHECK-NEXT: store ptr [[TMP0]], ptr [[__PTR_ADDR_I]], align 8
28 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__PTR_ADDR_I]], align 8
29 // CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[__OFFSET_ADDR_I]], align 8
30 // CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[TMP3]]
31 // CHECK-NEXT: store ptr [[ADD_PTR_I]], ptr [[__ADDR_I]], align 8
32 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[__ADDR_I]], align 8
33 // CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr [[TMP4]], align 1
34 // CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8
35 // CHECK-NEXT: store <8 x i16> [[TMP6]], ptr [[TMP7]], align 16
36 // CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8
37 // CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr [[TMP8]], align 16
38 // CHECK-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ST_ADDR]], align 8
39 // CHECK-NEXT: store <8 x i16> [[TMP9]], ptr [[__VEC_ADDR_I]], align 16
40 // CHECK-NEXT: store i64 7, ptr [[__OFFSET_ADDR_I1]], align 8
41 // CHECK-NEXT: store ptr [[TMP10]], ptr [[__PTR_ADDR_I2]], align 8
42 // CHECK-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__PTR_ADDR_I2]], align 8
43 // CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[__OFFSET_ADDR_I1]], align 8
44 // CHECK-NEXT: [[ADD_PTR_I4:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 [[TMP13]]
45 // CHECK-NEXT: store ptr [[ADD_PTR_I4]], ptr [[__ADDR_I3]], align 8
46 // CHECK-NEXT: [[TMP14:%.*]] = load <8 x i16>, ptr [[__VEC_ADDR_I]], align 16
47 // CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[__ADDR_I3]], align 8
48 // CHECK-NEXT: store <8 x i16> [[TMP14]], ptr [[TMP15]], align 1
49 // CHECK-NEXT: ret void
51 void test1(vector signed short *c, signed short *st, const signed short *ld) {
52 *c = vec_xl(3ll, ld);
53 vec_xst(*c, 7ll, st);
56 // CHECK-LABEL: @test2(
57 // CHECK-NEXT: entry:
58 // CHECK-NEXT: [[__VEC_ADDR_I:%.*]] = alloca <8 x i16>, align 16
59 // CHECK-NEXT: [[__OFFSET_ADDR_I1:%.*]] = alloca i64, align 8
60 // CHECK-NEXT: [[__PTR_ADDR_I2:%.*]] = alloca ptr, align 8
61 // CHECK-NEXT: [[__ADDR_I3:%.*]] = alloca ptr, align 8
62 // CHECK-NEXT: [[__OFFSET_ADDR_I:%.*]] = alloca i64, align 8
63 // CHECK-NEXT: [[__PTR_ADDR_I:%.*]] = alloca ptr, align 8
64 // CHECK-NEXT: [[__ADDR_I:%.*]] = alloca ptr, align 8
65 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
66 // CHECK-NEXT: [[ST_ADDR:%.*]] = alloca ptr, align 8
67 // CHECK-NEXT: [[LD_ADDR:%.*]] = alloca ptr, align 8
68 // CHECK-NEXT: store ptr [[C:%.*]], ptr [[C_ADDR]], align 8
69 // CHECK-NEXT: store ptr [[ST:%.*]], ptr [[ST_ADDR]], align 8
70 // CHECK-NEXT: store ptr [[LD:%.*]], ptr [[LD_ADDR]], align 8
71 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[LD_ADDR]], align 8
72 // CHECK-NEXT: store i64 3, ptr [[__OFFSET_ADDR_I]], align 8
73 // CHECK-NEXT: store ptr [[TMP0]], ptr [[__PTR_ADDR_I]], align 8
74 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__PTR_ADDR_I]], align 8
75 // CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[__OFFSET_ADDR_I]], align 8
76 // CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[TMP3]]
77 // CHECK-NEXT: store ptr [[ADD_PTR_I]], ptr [[__ADDR_I]], align 8
78 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[__ADDR_I]], align 8
79 // CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr [[TMP4]], align 1
80 // CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8
81 // CHECK-NEXT: store <8 x i16> [[TMP6]], ptr [[TMP7]], align 16
82 // CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8
83 // CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr [[TMP8]], align 16
84 // CHECK-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ST_ADDR]], align 8
85 // CHECK-NEXT: store <8 x i16> [[TMP9]], ptr [[__VEC_ADDR_I]], align 16
86 // CHECK-NEXT: store i64 7, ptr [[__OFFSET_ADDR_I1]], align 8
87 // CHECK-NEXT: store ptr [[TMP10]], ptr [[__PTR_ADDR_I2]], align 8
88 // CHECK-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__PTR_ADDR_I2]], align 8
89 // CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[__OFFSET_ADDR_I1]], align 8
90 // CHECK-NEXT: [[ADD_PTR_I4:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 [[TMP13]]
91 // CHECK-NEXT: store ptr [[ADD_PTR_I4]], ptr [[__ADDR_I3]], align 8
92 // CHECK-NEXT: [[TMP14:%.*]] = load <8 x i16>, ptr [[__VEC_ADDR_I]], align 16
93 // CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[__ADDR_I3]], align 8
94 // CHECK-NEXT: store <8 x i16> [[TMP14]], ptr [[TMP15]], align 1
95 // CHECK-NEXT: ret void
97 void test2(vector unsigned short *c, unsigned short *st,
98 const unsigned short *ld) {
99 *c = vec_xl(3ll, ld);
100 vec_xst(*c, 7ll, st);
103 // CHECK-LABEL: @test3(
104 // CHECK-NEXT: entry:
105 // CHECK-NEXT: [[__VEC_ADDR_I:%.*]] = alloca <4 x i32>, align 16
106 // CHECK-NEXT: [[__OFFSET_ADDR_I1:%.*]] = alloca i64, align 8
107 // CHECK-NEXT: [[__PTR_ADDR_I2:%.*]] = alloca ptr, align 8
108 // CHECK-NEXT: [[__ADDR_I3:%.*]] = alloca ptr, align 8
109 // CHECK-NEXT: [[__OFFSET_ADDR_I:%.*]] = alloca i64, align 8
110 // CHECK-NEXT: [[__PTR_ADDR_I:%.*]] = alloca ptr, align 8
111 // CHECK-NEXT: [[__ADDR_I:%.*]] = alloca ptr, align 8
112 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
113 // CHECK-NEXT: [[ST_ADDR:%.*]] = alloca ptr, align 8
114 // CHECK-NEXT: [[LD_ADDR:%.*]] = alloca ptr, align 8
115 // CHECK-NEXT: store ptr [[C:%.*]], ptr [[C_ADDR]], align 8
116 // CHECK-NEXT: store ptr [[ST:%.*]], ptr [[ST_ADDR]], align 8
117 // CHECK-NEXT: store ptr [[LD:%.*]], ptr [[LD_ADDR]], align 8
118 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[LD_ADDR]], align 8
119 // CHECK-NEXT: store i64 3, ptr [[__OFFSET_ADDR_I]], align 8
120 // CHECK-NEXT: store ptr [[TMP0]], ptr [[__PTR_ADDR_I]], align 8
121 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__PTR_ADDR_I]], align 8
122 // CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[__OFFSET_ADDR_I]], align 8
123 // CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[TMP3]]
124 // CHECK-NEXT: store ptr [[ADD_PTR_I]], ptr [[__ADDR_I]], align 8
125 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[__ADDR_I]], align 8
126 // CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr [[TMP4]], align 1
127 // CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8
128 // CHECK-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP7]], align 16
129 // CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8
130 // CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr [[TMP8]], align 16
131 // CHECK-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ST_ADDR]], align 8
132 // CHECK-NEXT: store <4 x i32> [[TMP9]], ptr [[__VEC_ADDR_I]], align 16
133 // CHECK-NEXT: store i64 7, ptr [[__OFFSET_ADDR_I1]], align 8
134 // CHECK-NEXT: store ptr [[TMP10]], ptr [[__PTR_ADDR_I2]], align 8
135 // CHECK-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__PTR_ADDR_I2]], align 8
136 // CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[__OFFSET_ADDR_I1]], align 8
137 // CHECK-NEXT: [[ADD_PTR_I4:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 [[TMP13]]
138 // CHECK-NEXT: store ptr [[ADD_PTR_I4]], ptr [[__ADDR_I3]], align 8
139 // CHECK-NEXT: [[TMP14:%.*]] = load <4 x i32>, ptr [[__VEC_ADDR_I]], align 16
140 // CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[__ADDR_I3]], align 8
141 // CHECK-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 1
142 // CHECK-NEXT: ret void
144 void test3(vector signed int *c, signed int *st, const signed int *ld) {
145 *c = vec_xl(3ll, ld);
146 vec_xst(*c, 7ll, st);
149 // CHECK-LABEL: @test4(
150 // CHECK-NEXT: entry:
151 // CHECK-NEXT: [[__VEC_ADDR_I:%.*]] = alloca <4 x i32>, align 16
152 // CHECK-NEXT: [[__OFFSET_ADDR_I1:%.*]] = alloca i64, align 8
153 // CHECK-NEXT: [[__PTR_ADDR_I2:%.*]] = alloca ptr, align 8
154 // CHECK-NEXT: [[__ADDR_I3:%.*]] = alloca ptr, align 8
155 // CHECK-NEXT: [[__OFFSET_ADDR_I:%.*]] = alloca i64, align 8
156 // CHECK-NEXT: [[__PTR_ADDR_I:%.*]] = alloca ptr, align 8
157 // CHECK-NEXT: [[__ADDR_I:%.*]] = alloca ptr, align 8
158 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
159 // CHECK-NEXT: [[ST_ADDR:%.*]] = alloca ptr, align 8
160 // CHECK-NEXT: [[LD_ADDR:%.*]] = alloca ptr, align 8
161 // CHECK-NEXT: store ptr [[C:%.*]], ptr [[C_ADDR]], align 8
162 // CHECK-NEXT: store ptr [[ST:%.*]], ptr [[ST_ADDR]], align 8
163 // CHECK-NEXT: store ptr [[LD:%.*]], ptr [[LD_ADDR]], align 8
164 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[LD_ADDR]], align 8
165 // CHECK-NEXT: store i64 3, ptr [[__OFFSET_ADDR_I]], align 8
166 // CHECK-NEXT: store ptr [[TMP0]], ptr [[__PTR_ADDR_I]], align 8
167 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__PTR_ADDR_I]], align 8
168 // CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[__OFFSET_ADDR_I]], align 8
169 // CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[TMP3]]
170 // CHECK-NEXT: store ptr [[ADD_PTR_I]], ptr [[__ADDR_I]], align 8
171 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[__ADDR_I]], align 8
172 // CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr [[TMP4]], align 1
173 // CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8
174 // CHECK-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP7]], align 16
175 // CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8
176 // CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr [[TMP8]], align 16
177 // CHECK-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ST_ADDR]], align 8
178 // CHECK-NEXT: store <4 x i32> [[TMP9]], ptr [[__VEC_ADDR_I]], align 16
179 // CHECK-NEXT: store i64 7, ptr [[__OFFSET_ADDR_I1]], align 8
180 // CHECK-NEXT: store ptr [[TMP10]], ptr [[__PTR_ADDR_I2]], align 8
181 // CHECK-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__PTR_ADDR_I2]], align 8
182 // CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[__OFFSET_ADDR_I1]], align 8
183 // CHECK-NEXT: [[ADD_PTR_I4:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 [[TMP13]]
184 // CHECK-NEXT: store ptr [[ADD_PTR_I4]], ptr [[__ADDR_I3]], align 8
185 // CHECK-NEXT: [[TMP14:%.*]] = load <4 x i32>, ptr [[__VEC_ADDR_I]], align 16
186 // CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[__ADDR_I3]], align 8
187 // CHECK-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 1
188 // CHECK-NEXT: ret void
190 void test4(vector unsigned int *c, unsigned int *st, const unsigned int *ld) {
191 *c = vec_xl(3ll, ld);
192 vec_xst(*c, 7ll, st);
195 // CHECK-LABEL: @test5(
196 // CHECK-NEXT: entry:
197 // CHECK-NEXT: [[__VEC_ADDR_I:%.*]] = alloca <2 x i64>, align 16
198 // CHECK-NEXT: [[__OFFSET_ADDR_I1:%.*]] = alloca i64, align 8
199 // CHECK-NEXT: [[__PTR_ADDR_I2:%.*]] = alloca ptr, align 8
200 // CHECK-NEXT: [[__ADDR_I3:%.*]] = alloca ptr, align 8
201 // CHECK-NEXT: [[__OFFSET_ADDR_I:%.*]] = alloca i64, align 8
202 // CHECK-NEXT: [[__PTR_ADDR_I:%.*]] = alloca ptr, align 8
203 // CHECK-NEXT: [[__ADDR_I:%.*]] = alloca ptr, align 8
204 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
205 // CHECK-NEXT: [[ST_ADDR:%.*]] = alloca ptr, align 8
206 // CHECK-NEXT: [[LD_ADDR:%.*]] = alloca ptr, align 8
207 // CHECK-NEXT: store ptr [[C:%.*]], ptr [[C_ADDR]], align 8
208 // CHECK-NEXT: store ptr [[ST:%.*]], ptr [[ST_ADDR]], align 8
209 // CHECK-NEXT: store ptr [[LD:%.*]], ptr [[LD_ADDR]], align 8
210 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[LD_ADDR]], align 8
211 // CHECK-NEXT: store i64 3, ptr [[__OFFSET_ADDR_I]], align 8
212 // CHECK-NEXT: store ptr [[TMP0]], ptr [[__PTR_ADDR_I]], align 8
213 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__PTR_ADDR_I]], align 8
214 // CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[__OFFSET_ADDR_I]], align 8
215 // CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[TMP3]]
216 // CHECK-NEXT: store ptr [[ADD_PTR_I]], ptr [[__ADDR_I]], align 8
217 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[__ADDR_I]], align 8
218 // CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr [[TMP4]], align 1
219 // CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8
220 // CHECK-NEXT: store <2 x i64> [[TMP6]], ptr [[TMP7]], align 16
221 // CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8
222 // CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr [[TMP8]], align 16
223 // CHECK-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ST_ADDR]], align 8
224 // CHECK-NEXT: store <2 x i64> [[TMP9]], ptr [[__VEC_ADDR_I]], align 16
225 // CHECK-NEXT: store i64 7, ptr [[__OFFSET_ADDR_I1]], align 8
226 // CHECK-NEXT: store ptr [[TMP10]], ptr [[__PTR_ADDR_I2]], align 8
227 // CHECK-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__PTR_ADDR_I2]], align 8
228 // CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[__OFFSET_ADDR_I1]], align 8
229 // CHECK-NEXT: [[ADD_PTR_I4:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 [[TMP13]]
230 // CHECK-NEXT: store ptr [[ADD_PTR_I4]], ptr [[__ADDR_I3]], align 8
231 // CHECK-NEXT: [[TMP14:%.*]] = load <2 x i64>, ptr [[__VEC_ADDR_I]], align 16
232 // CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[__ADDR_I3]], align 8
233 // CHECK-NEXT: store <2 x i64> [[TMP14]], ptr [[TMP15]], align 1
234 // CHECK-NEXT: ret void
236 void test5(vector signed long long *c, signed long long *st,
237 const signed long long *ld) {
238 *c = vec_xl(3ll, ld);
239 vec_xst(*c, 7ll, st);
242 // CHECK-LABEL: @test6(
243 // CHECK-NEXT: entry:
244 // CHECK-NEXT: [[__VEC_ADDR_I:%.*]] = alloca <2 x i64>, align 16
245 // CHECK-NEXT: [[__OFFSET_ADDR_I1:%.*]] = alloca i64, align 8
246 // CHECK-NEXT: [[__PTR_ADDR_I2:%.*]] = alloca ptr, align 8
247 // CHECK-NEXT: [[__ADDR_I3:%.*]] = alloca ptr, align 8
248 // CHECK-NEXT: [[__OFFSET_ADDR_I:%.*]] = alloca i64, align 8
249 // CHECK-NEXT: [[__PTR_ADDR_I:%.*]] = alloca ptr, align 8
250 // CHECK-NEXT: [[__ADDR_I:%.*]] = alloca ptr, align 8
251 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
252 // CHECK-NEXT: [[ST_ADDR:%.*]] = alloca ptr, align 8
253 // CHECK-NEXT: [[LD_ADDR:%.*]] = alloca ptr, align 8
254 // CHECK-NEXT: store ptr [[C:%.*]], ptr [[C_ADDR]], align 8
255 // CHECK-NEXT: store ptr [[ST:%.*]], ptr [[ST_ADDR]], align 8
256 // CHECK-NEXT: store ptr [[LD:%.*]], ptr [[LD_ADDR]], align 8
257 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[LD_ADDR]], align 8
258 // CHECK-NEXT: store i64 3, ptr [[__OFFSET_ADDR_I]], align 8
259 // CHECK-NEXT: store ptr [[TMP0]], ptr [[__PTR_ADDR_I]], align 8
260 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__PTR_ADDR_I]], align 8
261 // CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[__OFFSET_ADDR_I]], align 8
262 // CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[TMP3]]
263 // CHECK-NEXT: store ptr [[ADD_PTR_I]], ptr [[__ADDR_I]], align 8
264 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[__ADDR_I]], align 8
265 // CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr [[TMP4]], align 1
266 // CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8
267 // CHECK-NEXT: store <2 x i64> [[TMP6]], ptr [[TMP7]], align 16
268 // CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8
269 // CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr [[TMP8]], align 16
270 // CHECK-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ST_ADDR]], align 8
271 // CHECK-NEXT: store <2 x i64> [[TMP9]], ptr [[__VEC_ADDR_I]], align 16
272 // CHECK-NEXT: store i64 7, ptr [[__OFFSET_ADDR_I1]], align 8
273 // CHECK-NEXT: store ptr [[TMP10]], ptr [[__PTR_ADDR_I2]], align 8
274 // CHECK-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__PTR_ADDR_I2]], align 8
275 // CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[__OFFSET_ADDR_I1]], align 8
276 // CHECK-NEXT: [[ADD_PTR_I4:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 [[TMP13]]
277 // CHECK-NEXT: store ptr [[ADD_PTR_I4]], ptr [[__ADDR_I3]], align 8
278 // CHECK-NEXT: [[TMP14:%.*]] = load <2 x i64>, ptr [[__VEC_ADDR_I]], align 16
279 // CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[__ADDR_I3]], align 8
280 // CHECK-NEXT: store <2 x i64> [[TMP14]], ptr [[TMP15]], align 1
281 // CHECK-NEXT: ret void
283 void test6(vector unsigned long long *c, unsigned long long *st,
284 const unsigned long long *ld) {
285 *c = vec_xl(3ll, ld);
286 vec_xst(*c, 7ll, st);
289 // CHECK-LABEL: @test7(
290 // CHECK-NEXT: entry:
291 // CHECK-NEXT: [[__VEC_ADDR_I:%.*]] = alloca <4 x float>, align 16
292 // CHECK-NEXT: [[__OFFSET_ADDR_I1:%.*]] = alloca i64, align 8
293 // CHECK-NEXT: [[__PTR_ADDR_I2:%.*]] = alloca ptr, align 8
294 // CHECK-NEXT: [[__ADDR_I3:%.*]] = alloca ptr, align 8
295 // CHECK-NEXT: [[__OFFSET_ADDR_I:%.*]] = alloca i64, align 8
296 // CHECK-NEXT: [[__PTR_ADDR_I:%.*]] = alloca ptr, align 8
297 // CHECK-NEXT: [[__ADDR_I:%.*]] = alloca ptr, align 8
298 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
299 // CHECK-NEXT: [[ST_ADDR:%.*]] = alloca ptr, align 8
300 // CHECK-NEXT: [[LD_ADDR:%.*]] = alloca ptr, align 8
301 // CHECK-NEXT: store ptr [[C:%.*]], ptr [[C_ADDR]], align 8
302 // CHECK-NEXT: store ptr [[ST:%.*]], ptr [[ST_ADDR]], align 8
303 // CHECK-NEXT: store ptr [[LD:%.*]], ptr [[LD_ADDR]], align 8
304 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[LD_ADDR]], align 8
305 // CHECK-NEXT: store i64 3, ptr [[__OFFSET_ADDR_I]], align 8
306 // CHECK-NEXT: store ptr [[TMP0]], ptr [[__PTR_ADDR_I]], align 8
307 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__PTR_ADDR_I]], align 8
308 // CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[__OFFSET_ADDR_I]], align 8
309 // CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[TMP3]]
310 // CHECK-NEXT: store ptr [[ADD_PTR_I]], ptr [[__ADDR_I]], align 8
311 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[__ADDR_I]], align 8
312 // CHECK-NEXT: [[TMP6:%.*]] = load <4 x float>, ptr [[TMP4]], align 1
313 // CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8
314 // CHECK-NEXT: store <4 x float> [[TMP6]], ptr [[TMP7]], align 16
315 // CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8
316 // CHECK-NEXT: [[TMP9:%.*]] = load <4 x float>, ptr [[TMP8]], align 16
317 // CHECK-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ST_ADDR]], align 8
318 // CHECK-NEXT: store <4 x float> [[TMP9]], ptr [[__VEC_ADDR_I]], align 16
319 // CHECK-NEXT: store i64 7, ptr [[__OFFSET_ADDR_I1]], align 8
320 // CHECK-NEXT: store ptr [[TMP10]], ptr [[__PTR_ADDR_I2]], align 8
321 // CHECK-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__PTR_ADDR_I2]], align 8
322 // CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[__OFFSET_ADDR_I1]], align 8
323 // CHECK-NEXT: [[ADD_PTR_I4:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 [[TMP13]]
324 // CHECK-NEXT: store ptr [[ADD_PTR_I4]], ptr [[__ADDR_I3]], align 8
325 // CHECK-NEXT: [[TMP14:%.*]] = load <4 x float>, ptr [[__VEC_ADDR_I]], align 16
326 // CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[__ADDR_I3]], align 8
327 // CHECK-NEXT: store <4 x float> [[TMP14]], ptr [[TMP15]], align 1
328 // CHECK-NEXT: ret void
330 void test7(vector float *c, float *st, const float *ld) {
331 *c = vec_xl(3ll, ld);
332 vec_xst(*c, 7ll, st);
335 // CHECK-LABEL: @test8(
336 // CHECK-NEXT: entry:
337 // CHECK-NEXT: [[__VEC_ADDR_I:%.*]] = alloca <2 x double>, align 16
338 // CHECK-NEXT: [[__OFFSET_ADDR_I1:%.*]] = alloca i64, align 8
339 // CHECK-NEXT: [[__PTR_ADDR_I2:%.*]] = alloca ptr, align 8
340 // CHECK-NEXT: [[__ADDR_I3:%.*]] = alloca ptr, align 8
341 // CHECK-NEXT: [[__OFFSET_ADDR_I:%.*]] = alloca i64, align 8
342 // CHECK-NEXT: [[__PTR_ADDR_I:%.*]] = alloca ptr, align 8
343 // CHECK-NEXT: [[__ADDR_I:%.*]] = alloca ptr, align 8
344 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
345 // CHECK-NEXT: [[ST_ADDR:%.*]] = alloca ptr, align 8
346 // CHECK-NEXT: [[LD_ADDR:%.*]] = alloca ptr, align 8
347 // CHECK-NEXT: store ptr [[C:%.*]], ptr [[C_ADDR]], align 8
348 // CHECK-NEXT: store ptr [[ST:%.*]], ptr [[ST_ADDR]], align 8
349 // CHECK-NEXT: store ptr [[LD:%.*]], ptr [[LD_ADDR]], align 8
350 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[LD_ADDR]], align 8
351 // CHECK-NEXT: store i64 3, ptr [[__OFFSET_ADDR_I]], align 8
352 // CHECK-NEXT: store ptr [[TMP0]], ptr [[__PTR_ADDR_I]], align 8
353 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__PTR_ADDR_I]], align 8
354 // CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[__OFFSET_ADDR_I]], align 8
355 // CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[TMP3]]
356 // CHECK-NEXT: store ptr [[ADD_PTR_I]], ptr [[__ADDR_I]], align 8
357 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[__ADDR_I]], align 8
358 // CHECK-NEXT: [[TMP6:%.*]] = load <2 x double>, ptr [[TMP4]], align 1
359 // CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8
360 // CHECK-NEXT: store <2 x double> [[TMP6]], ptr [[TMP7]], align 16
361 // CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8
362 // CHECK-NEXT: [[TMP9:%.*]] = load <2 x double>, ptr [[TMP8]], align 16
363 // CHECK-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ST_ADDR]], align 8
364 // CHECK-NEXT: store <2 x double> [[TMP9]], ptr [[__VEC_ADDR_I]], align 16
365 // CHECK-NEXT: store i64 7, ptr [[__OFFSET_ADDR_I1]], align 8
366 // CHECK-NEXT: store ptr [[TMP10]], ptr [[__PTR_ADDR_I2]], align 8
367 // CHECK-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__PTR_ADDR_I2]], align 8
368 // CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[__OFFSET_ADDR_I1]], align 8
369 // CHECK-NEXT: [[ADD_PTR_I4:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 [[TMP13]]
370 // CHECK-NEXT: store ptr [[ADD_PTR_I4]], ptr [[__ADDR_I3]], align 8
371 // CHECK-NEXT: [[TMP14:%.*]] = load <2 x double>, ptr [[__VEC_ADDR_I]], align 16
372 // CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[__ADDR_I3]], align 8
373 // CHECK-NEXT: store <2 x double> [[TMP14]], ptr [[TMP15]], align 1
374 // CHECK-NEXT: ret void
376 void test8(vector double *c, double *st, const double *ld) {
377 *c = vec_xl(3ll, ld);
378 vec_xst(*c, 7ll, st);
381 #ifdef __POWER8_VECTOR__
382 // CHECK-P8-LABEL: @test9(
383 // CHECK-P8-NEXT: entry:
384 // CHECK-P8-NEXT: [[__VEC_ADDR_I:%.*]] = alloca <1 x i128>, align 16
385 // CHECK-P8-NEXT: [[__OFFSET_ADDR_I1:%.*]] = alloca i64, align 8
386 // CHECK-P8-NEXT: [[__PTR_ADDR_I2:%.*]] = alloca ptr, align 8
387 // CHECK-P8-NEXT: [[__ADDR_I3:%.*]] = alloca ptr, align 8
388 // CHECK-P8-NEXT: [[__OFFSET_ADDR_I:%.*]] = alloca i64, align 8
389 // CHECK-P8-NEXT: [[__PTR_ADDR_I:%.*]] = alloca ptr, align 8
390 // CHECK-P8-NEXT: [[__ADDR_I:%.*]] = alloca ptr, align 8
391 // CHECK-P8-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
392 // CHECK-P8-NEXT: [[ST_ADDR:%.*]] = alloca ptr, align 8
393 // CHECK-P8-NEXT: [[LD_ADDR:%.*]] = alloca ptr, align 8
394 // CHECK-P8-NEXT: store ptr [[C:%.*]], ptr [[C_ADDR]], align 8
395 // CHECK-P8-NEXT: store ptr [[ST:%.*]], ptr [[ST_ADDR]], align 8
396 // CHECK-P8-NEXT: store ptr [[LD:%.*]], ptr [[LD_ADDR]], align 8
397 // CHECK-P8-NEXT: [[TMP0:%.*]] = load ptr, ptr [[LD_ADDR]], align 8
398 // CHECK-P8-NEXT: store i64 3, ptr [[__OFFSET_ADDR_I]], align 8
399 // CHECK-P8-NEXT: store ptr [[TMP0]], ptr [[__PTR_ADDR_I]], align 8
400 // CHECK-P8-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__PTR_ADDR_I]], align 8
401 // CHECK-P8-NEXT: [[TMP3:%.*]] = load i64, ptr [[__OFFSET_ADDR_I]], align 8
402 // CHECK-P8-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[TMP3]]
403 // CHECK-P8-NEXT: store ptr [[ADD_PTR_I]], ptr [[__ADDR_I]], align 8
404 // CHECK-P8-NEXT: [[TMP4:%.*]] = load ptr, ptr [[__ADDR_I]], align 8
405 // CHECK-P8-NEXT: [[TMP6:%.*]] = load <1 x i128>, ptr [[TMP4]], align 1
406 // CHECK-P8-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8
407 // CHECK-P8-NEXT: store <1 x i128> [[TMP6]], ptr [[TMP7]], align 16
408 // CHECK-P8-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8
409 // CHECK-P8-NEXT: [[TMP9:%.*]] = load <1 x i128>, ptr [[TMP8]], align 16
410 // CHECK-P8-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ST_ADDR]], align 8
411 // CHECK-P8-NEXT: store <1 x i128> [[TMP9]], ptr [[__VEC_ADDR_I]], align 16
412 // CHECK-P8-NEXT: store i64 7, ptr [[__OFFSET_ADDR_I1]], align 8
413 // CHECK-P8-NEXT: store ptr [[TMP10]], ptr [[__PTR_ADDR_I2]], align 8
414 // CHECK-P8-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__PTR_ADDR_I2]], align 8
415 // CHECK-P8-NEXT: [[TMP13:%.*]] = load i64, ptr [[__OFFSET_ADDR_I1]], align 8
416 // CHECK-P8-NEXT: [[ADD_PTR_I4:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 [[TMP13]]
417 // CHECK-P8-NEXT: store ptr [[ADD_PTR_I4]], ptr [[__ADDR_I3]], align 8
418 // CHECK-P8-NEXT: [[TMP14:%.*]] = load <1 x i128>, ptr [[__VEC_ADDR_I]], align 16
419 // CHECK-P8-NEXT: [[TMP15:%.*]] = load ptr, ptr [[__ADDR_I3]], align 8
420 // CHECK-P8-NEXT: store <1 x i128> [[TMP14]], ptr [[TMP15]], align 1
421 // CHECK-P8-NEXT: ret void
423 void test9(vector signed __int128 *c, signed __int128 *st,
424 const signed __int128 *ld) {
425 *c = vec_xl(3ll, ld);
426 vec_xst(*c, 7ll, st);
429 // CHECK-P8-LABEL: @test10(
430 // CHECK-P8-NEXT: entry:
431 // CHECK-P8-NEXT: [[__VEC_ADDR_I:%.*]] = alloca <1 x i128>, align 16
432 // CHECK-P8-NEXT: [[__OFFSET_ADDR_I1:%.*]] = alloca i64, align 8
433 // CHECK-P8-NEXT: [[__PTR_ADDR_I2:%.*]] = alloca ptr, align 8
434 // CHECK-P8-NEXT: [[__ADDR_I3:%.*]] = alloca ptr, align 8
435 // CHECK-P8-NEXT: [[__OFFSET_ADDR_I:%.*]] = alloca i64, align 8
436 // CHECK-P8-NEXT: [[__PTR_ADDR_I:%.*]] = alloca ptr, align 8
437 // CHECK-P8-NEXT: [[__ADDR_I:%.*]] = alloca ptr, align 8
438 // CHECK-P8-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
439 // CHECK-P8-NEXT: [[ST_ADDR:%.*]] = alloca ptr, align 8
440 // CHECK-P8-NEXT: [[LD_ADDR:%.*]] = alloca ptr, align 8
441 // CHECK-P8-NEXT: store ptr [[C:%.*]], ptr [[C_ADDR]], align 8
442 // CHECK-P8-NEXT: store ptr [[ST:%.*]], ptr [[ST_ADDR]], align 8
443 // CHECK-P8-NEXT: store ptr [[LD:%.*]], ptr [[LD_ADDR]], align 8
444 // CHECK-P8-NEXT: [[TMP0:%.*]] = load ptr, ptr [[LD_ADDR]], align 8
445 // CHECK-P8-NEXT: store i64 3, ptr [[__OFFSET_ADDR_I]], align 8
446 // CHECK-P8-NEXT: store ptr [[TMP0]], ptr [[__PTR_ADDR_I]], align 8
447 // CHECK-P8-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__PTR_ADDR_I]], align 8
448 // CHECK-P8-NEXT: [[TMP3:%.*]] = load i64, ptr [[__OFFSET_ADDR_I]], align 8
449 // CHECK-P8-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[TMP3]]
450 // CHECK-P8-NEXT: store ptr [[ADD_PTR_I]], ptr [[__ADDR_I]], align 8
451 // CHECK-P8-NEXT: [[TMP4:%.*]] = load ptr, ptr [[__ADDR_I]], align 8
452 // CHECK-P8-NEXT: [[TMP6:%.*]] = load <1 x i128>, ptr [[TMP4]], align 1
453 // CHECK-P8-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8
454 // CHECK-P8-NEXT: store <1 x i128> [[TMP6]], ptr [[TMP7]], align 16
455 // CHECK-P8-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8
456 // CHECK-P8-NEXT: [[TMP9:%.*]] = load <1 x i128>, ptr [[TMP8]], align 16
457 // CHECK-P8-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ST_ADDR]], align 8
458 // CHECK-P8-NEXT: store <1 x i128> [[TMP9]], ptr [[__VEC_ADDR_I]], align 16
459 // CHECK-P8-NEXT: store i64 7, ptr [[__OFFSET_ADDR_I1]], align 8
460 // CHECK-P8-NEXT: store ptr [[TMP10]], ptr [[__PTR_ADDR_I2]], align 8
461 // CHECK-P8-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__PTR_ADDR_I2]], align 8
462 // CHECK-P8-NEXT: [[TMP13:%.*]] = load i64, ptr [[__OFFSET_ADDR_I1]], align 8
463 // CHECK-P8-NEXT: [[ADD_PTR_I4:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 [[TMP13]]
464 // CHECK-P8-NEXT: store ptr [[ADD_PTR_I4]], ptr [[__ADDR_I3]], align 8
465 // CHECK-P8-NEXT: [[TMP14:%.*]] = load <1 x i128>, ptr [[__VEC_ADDR_I]], align 16
466 // CHECK-P8-NEXT: [[TMP15:%.*]] = load ptr, ptr [[__ADDR_I3]], align 8
467 // CHECK-P8-NEXT: store <1 x i128> [[TMP14]], ptr [[TMP15]], align 1
468 // CHECK-P8-NEXT: ret void
470 void test10(vector unsigned __int128 *c, unsigned __int128 *st,
471 const unsigned __int128 *ld) {
472 *c = vec_xl(3ll, ld);
473 vec_xst(*c, 7ll, st);
475 #endif