[clang] Handle __declspec() attributes in using
[llvm-project.git] / clang / test / CodeGen / PowerPC / ppc-mma-types.c
blob7e5b7fcd924761448cb55dae2698b118c35bd642
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple powerpc64le-linux-unknown -target-cpu pwr10 \
3 // RUN: -emit-llvm -o - %s | FileCheck %s
4 // RUN: %clang_cc1 -triple powerpc64le-linux-unknown -target-cpu pwr9 \
5 // RUN: -emit-llvm -o - %s | FileCheck %s
6 // RUN: %clang_cc1 -triple powerpc64le-linux-unknown -target-cpu pwr8 \
7 // RUN: -emit-llvm -o - %s | FileCheck %s
9 // CHECK-LABEL: @test1(
10 // CHECK-NEXT: entry:
11 // CHECK-NEXT: [[PTR1_ADDR:%.*]] = alloca ptr, align 8
12 // CHECK-NEXT: [[PTR2_ADDR:%.*]] = alloca ptr, align 8
13 // CHECK-NEXT: store ptr [[PTR1:%.*]], ptr [[PTR1_ADDR]], align 8
14 // CHECK-NEXT: store ptr [[PTR2:%.*]], ptr [[PTR2_ADDR]], align 8
15 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR1_ADDR]], align 8
16 // CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <512 x i1>, ptr [[TMP0]], i64 2
17 // CHECK-NEXT: [[TMP1:%.*]] = load <512 x i1>, ptr [[ADD_PTR]], align 64
18 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[PTR2_ADDR]], align 8
19 // CHECK-NEXT: [[ADD_PTR1:%.*]] = getelementptr inbounds <512 x i1>, ptr [[TMP2]], i64 1
20 // CHECK-NEXT: store <512 x i1> [[TMP1]], ptr [[ADD_PTR1]], align 64
21 // CHECK-NEXT: ret void
23 void test1(__vector_quad *ptr1, __vector_quad *ptr2) {
24 *(ptr2 + 1) = *(ptr1 + 2);
27 // CHECK-LABEL: @test2(
28 // CHECK-NEXT: entry:
29 // CHECK-NEXT: [[PTR1_ADDR:%.*]] = alloca ptr, align 8
30 // CHECK-NEXT: [[PTR2_ADDR:%.*]] = alloca ptr, align 8
31 // CHECK-NEXT: store ptr [[PTR1:%.*]], ptr [[PTR1_ADDR]], align 8
32 // CHECK-NEXT: store ptr [[PTR2:%.*]], ptr [[PTR2_ADDR]], align 8
33 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR1_ADDR]], align 8
34 // CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <256 x i1>, ptr [[TMP0]], i64 2
35 // CHECK-NEXT: [[TMP1:%.*]] = load <256 x i1>, ptr [[ADD_PTR]], align 32
36 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[PTR2_ADDR]], align 8
37 // CHECK-NEXT: [[ADD_PTR1:%.*]] = getelementptr inbounds <256 x i1>, ptr [[TMP2]], i64 1
38 // CHECK-NEXT: store <256 x i1> [[TMP1]], ptr [[ADD_PTR1]], align 32
39 // CHECK-NEXT: ret void
41 void test2(__vector_pair *ptr1, __vector_pair *ptr2) {
42 *(ptr2 + 1) = *(ptr1 + 2);
45 typedef __vector_quad vq_t;
46 // CHECK-LABEL: @testVQTypedef(
47 // CHECK-NEXT: entry:
48 // CHECK-NEXT: [[INP_ADDR:%.*]] = alloca ptr, align 8
49 // CHECK-NEXT: [[OUTP_ADDR:%.*]] = alloca ptr, align 8
50 // CHECK-NEXT: [[VQIN:%.*]] = alloca ptr, align 8
51 // CHECK-NEXT: [[VQOUT:%.*]] = alloca ptr, align 8
52 // CHECK-NEXT: store ptr [[INP:%.*]], ptr [[INP_ADDR]], align 8
53 // CHECK-NEXT: store ptr [[OUTP:%.*]], ptr [[OUTP_ADDR]], align 8
54 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[INP_ADDR]], align 8
55 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VQIN]], align 8
56 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[OUTP_ADDR]], align 8
57 // CHECK-NEXT: store ptr [[TMP2]], ptr [[VQOUT]], align 8
58 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VQIN]], align 8
59 // CHECK-NEXT: [[TMP5:%.*]] = load <512 x i1>, ptr [[TMP4]], align 64
60 // CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[VQOUT]], align 8
61 // CHECK-NEXT: store <512 x i1> [[TMP5]], ptr [[TMP6]], align 64
62 // CHECK-NEXT: ret void
64 void testVQTypedef(int *inp, int *outp) {
65 vq_t *vqin = (vq_t *)inp;
66 vq_t *vqout = (vq_t *)outp;
67 *vqout = *vqin;
70 // CHECK-LABEL: @testVQArg3(
71 // CHECK-NEXT: entry:
72 // CHECK-NEXT: [[VQ_ADDR:%.*]] = alloca ptr, align 8
73 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
74 // CHECK-NEXT: [[VQP:%.*]] = alloca ptr, align 8
75 // CHECK-NEXT: store ptr [[VQ:%.*]], ptr [[VQ_ADDR]], align 8
76 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
77 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
78 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
79 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VQ_ADDR]], align 8
80 // CHECK-NEXT: [[TMP3:%.*]] = load <512 x i1>, ptr [[TMP2]], align 64
81 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VQP]], align 8
82 // CHECK-NEXT: store <512 x i1> [[TMP3]], ptr [[TMP4]], align 64
83 // CHECK-NEXT: ret void
85 void testVQArg3(__vector_quad *vq, int *ptr) {
86 __vector_quad *vqp = (__vector_quad *)ptr;
87 *vqp = *vq;
90 // CHECK-LABEL: @testVQArg4(
91 // CHECK-NEXT: entry:
92 // CHECK-NEXT: [[VQ_ADDR:%.*]] = alloca ptr, align 8
93 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
94 // CHECK-NEXT: [[VQP:%.*]] = alloca ptr, align 8
95 // CHECK-NEXT: store ptr [[VQ:%.*]], ptr [[VQ_ADDR]], align 8
96 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
97 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
98 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
99 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VQ_ADDR]], align 8
100 // CHECK-NEXT: [[TMP3:%.*]] = load <512 x i1>, ptr [[TMP2]], align 64
101 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VQP]], align 8
102 // CHECK-NEXT: store <512 x i1> [[TMP3]], ptr [[TMP4]], align 64
103 // CHECK-NEXT: ret void
105 void testVQArg4(const __vector_quad *const vq, int *ptr) {
106 __vector_quad *vqp = (__vector_quad *)ptr;
107 *vqp = *vq;
110 // CHECK-LABEL: @testVQArg5(
111 // CHECK-NEXT: entry:
112 // CHECK-NEXT: [[VQA_ADDR:%.*]] = alloca ptr, align 8
113 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
114 // CHECK-NEXT: [[VQP:%.*]] = alloca ptr, align 8
115 // CHECK-NEXT: store ptr [[VQA:%.*]], ptr [[VQA_ADDR]], align 8
116 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
117 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
118 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
119 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VQA_ADDR]], align 8
120 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds <512 x i1>, ptr [[TMP2]], i64 0
121 // CHECK-NEXT: [[TMP3:%.*]] = load <512 x i1>, ptr [[ARRAYIDX]], align 64
122 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VQP]], align 8
123 // CHECK-NEXT: store <512 x i1> [[TMP3]], ptr [[TMP4]], align 64
124 // CHECK-NEXT: ret void
126 void testVQArg5(__vector_quad vqa[], int *ptr) {
127 __vector_quad *vqp = (__vector_quad *)ptr;
128 *vqp = vqa[0];
131 // CHECK-LABEL: @testVQArg7(
132 // CHECK-NEXT: entry:
133 // CHECK-NEXT: [[VQ_ADDR:%.*]] = alloca ptr, align 8
134 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
135 // CHECK-NEXT: [[VQP:%.*]] = alloca ptr, align 8
136 // CHECK-NEXT: store ptr [[VQ:%.*]], ptr [[VQ_ADDR]], align 8
137 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
138 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
139 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
140 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VQ_ADDR]], align 8
141 // CHECK-NEXT: [[TMP3:%.*]] = load <512 x i1>, ptr [[TMP2]], align 64
142 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VQP]], align 8
143 // CHECK-NEXT: store <512 x i1> [[TMP3]], ptr [[TMP4]], align 64
144 // CHECK-NEXT: ret void
146 void testVQArg7(const vq_t *vq, int *ptr) {
147 __vector_quad *vqp = (__vector_quad *)ptr;
148 *vqp = *vq;
151 // CHECK-LABEL: @testVQRet2(
152 // CHECK-NEXT: entry:
153 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
154 // CHECK-NEXT: [[VQP:%.*]] = alloca ptr, align 8
155 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
156 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
157 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
158 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VQP]], align 8
159 // CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <512 x i1>, ptr [[TMP2]], i64 2
160 // CHECK-NEXT: ret ptr [[ADD_PTR]]
162 __vector_quad *testVQRet2(int *ptr) {
163 __vector_quad *vqp = (__vector_quad *)ptr;
164 return vqp + 2;
167 // CHECK-LABEL: @testVQRet3(
168 // CHECK-NEXT: entry:
169 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
170 // CHECK-NEXT: [[VQP:%.*]] = alloca ptr, align 8
171 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
172 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
173 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
174 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VQP]], align 8
175 // CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <512 x i1>, ptr [[TMP2]], i64 2
176 // CHECK-NEXT: ret ptr [[ADD_PTR]]
178 const __vector_quad *testVQRet3(int *ptr) {
179 __vector_quad *vqp = (__vector_quad *)ptr;
180 return vqp + 2;
183 // CHECK-LABEL: @testVQRet5(
184 // CHECK-NEXT: entry:
185 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
186 // CHECK-NEXT: [[VQP:%.*]] = alloca ptr, align 8
187 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
188 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
189 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
190 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VQP]], align 8
191 // CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <512 x i1>, ptr [[TMP2]], i64 2
192 // CHECK-NEXT: ret ptr [[ADD_PTR]]
194 const vq_t *testVQRet5(int *ptr) {
195 __vector_quad *vqp = (__vector_quad *)ptr;
196 return vqp + 2;
199 // CHECK-LABEL: @testVQSizeofAlignof(
200 // CHECK-NEXT: entry:
201 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
202 // CHECK-NEXT: [[VQP:%.*]] = alloca ptr, align 8
203 // CHECK-NEXT: [[VQ:%.*]] = alloca <512 x i1>, align 64
204 // CHECK-NEXT: [[SIZET:%.*]] = alloca i32, align 4
205 // CHECK-NEXT: [[ALIGNT:%.*]] = alloca i32, align 4
206 // CHECK-NEXT: [[SIZEV:%.*]] = alloca i32, align 4
207 // CHECK-NEXT: [[ALIGNV:%.*]] = alloca i32, align 4
208 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
209 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
210 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
211 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VQP]], align 8
212 // CHECK-NEXT: [[TMP3:%.*]] = load <512 x i1>, ptr [[TMP2]], align 64
213 // CHECK-NEXT: store <512 x i1> [[TMP3]], ptr [[VQ]], align 64
214 // CHECK-NEXT: store i32 64, ptr [[SIZET]], align 4
215 // CHECK-NEXT: store i32 64, ptr [[ALIGNT]], align 4
216 // CHECK-NEXT: store i32 64, ptr [[SIZEV]], align 4
217 // CHECK-NEXT: store i32 64, ptr [[ALIGNV]], align 4
218 // CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[SIZET]], align 4
219 // CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ALIGNT]], align 4
220 // CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP4]], [[TMP5]]
221 // CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[SIZEV]], align 4
222 // CHECK-NEXT: [[ADD1:%.*]] = add i32 [[ADD]], [[TMP6]]
223 // CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ALIGNV]], align 4
224 // CHECK-NEXT: [[ADD2:%.*]] = add i32 [[ADD1]], [[TMP7]]
225 // CHECK-NEXT: ret i32 [[ADD2]]
227 int testVQSizeofAlignof(int *ptr) {
228 __vector_quad *vqp = (__vector_quad *)ptr;
229 __vector_quad vq = *vqp;
230 unsigned sizet = sizeof(__vector_quad);
231 unsigned alignt = __alignof__(__vector_quad);
232 unsigned sizev = sizeof(vq);
233 unsigned alignv = __alignof__(vq);
234 return sizet + alignt + sizev + alignv;
237 typedef __vector_pair vp_t;
238 // CHECK-LABEL: @testVPTypedef(
239 // CHECK-NEXT: entry:
240 // CHECK-NEXT: [[INP_ADDR:%.*]] = alloca ptr, align 8
241 // CHECK-NEXT: [[OUTP_ADDR:%.*]] = alloca ptr, align 8
242 // CHECK-NEXT: [[VPIN:%.*]] = alloca ptr, align 8
243 // CHECK-NEXT: [[VPOUT:%.*]] = alloca ptr, align 8
244 // CHECK-NEXT: store ptr [[INP:%.*]], ptr [[INP_ADDR]], align 8
245 // CHECK-NEXT: store ptr [[OUTP:%.*]], ptr [[OUTP_ADDR]], align 8
246 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[INP_ADDR]], align 8
247 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VPIN]], align 8
248 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[OUTP_ADDR]], align 8
249 // CHECK-NEXT: store ptr [[TMP2]], ptr [[VPOUT]], align 8
250 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VPIN]], align 8
251 // CHECK-NEXT: [[TMP5:%.*]] = load <256 x i1>, ptr [[TMP4]], align 32
252 // CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[VPOUT]], align 8
253 // CHECK-NEXT: store <256 x i1> [[TMP5]], ptr [[TMP6]], align 32
254 // CHECK-NEXT: ret void
256 void testVPTypedef(int *inp, int *outp) {
257 vp_t *vpin = (vp_t *)inp;
258 vp_t *vpout = (vp_t *)outp;
259 *vpout = *vpin;
262 // CHECK-LABEL: @testVPArg3(
263 // CHECK-NEXT: entry:
264 // CHECK-NEXT: [[VP_ADDR:%.*]] = alloca ptr, align 8
265 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
266 // CHECK-NEXT: [[VPP:%.*]] = alloca ptr, align 8
267 // CHECK-NEXT: store ptr [[VP:%.*]], ptr [[VP_ADDR]], align 8
268 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
269 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
270 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
271 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VP_ADDR]], align 8
272 // CHECK-NEXT: [[TMP3:%.*]] = load <256 x i1>, ptr [[TMP2]], align 32
273 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VPP]], align 8
274 // CHECK-NEXT: store <256 x i1> [[TMP3]], ptr [[TMP4]], align 32
275 // CHECK-NEXT: ret void
277 void testVPArg3(__vector_pair *vp, int *ptr) {
278 __vector_pair *vpp = (__vector_pair *)ptr;
279 *vpp = *vp;
282 // CHECK-LABEL: @testVPArg4(
283 // CHECK-NEXT: entry:
284 // CHECK-NEXT: [[VP_ADDR:%.*]] = alloca ptr, align 8
285 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
286 // CHECK-NEXT: [[VPP:%.*]] = alloca ptr, align 8
287 // CHECK-NEXT: store ptr [[VP:%.*]], ptr [[VP_ADDR]], align 8
288 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
289 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
290 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
291 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VP_ADDR]], align 8
292 // CHECK-NEXT: [[TMP3:%.*]] = load <256 x i1>, ptr [[TMP2]], align 32
293 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VPP]], align 8
294 // CHECK-NEXT: store <256 x i1> [[TMP3]], ptr [[TMP4]], align 32
295 // CHECK-NEXT: ret void
297 void testVPArg4(const __vector_pair *const vp, int *ptr) {
298 __vector_pair *vpp = (__vector_pair *)ptr;
299 *vpp = *vp;
302 // CHECK-LABEL: @testVPArg5(
303 // CHECK-NEXT: entry:
304 // CHECK-NEXT: [[VPA_ADDR:%.*]] = alloca ptr, align 8
305 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
306 // CHECK-NEXT: [[VPP:%.*]] = alloca ptr, align 8
307 // CHECK-NEXT: store ptr [[VPA:%.*]], ptr [[VPA_ADDR]], align 8
308 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
309 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
310 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
311 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VPA_ADDR]], align 8
312 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds <256 x i1>, ptr [[TMP2]], i64 0
313 // CHECK-NEXT: [[TMP3:%.*]] = load <256 x i1>, ptr [[ARRAYIDX]], align 32
314 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VPP]], align 8
315 // CHECK-NEXT: store <256 x i1> [[TMP3]], ptr [[TMP4]], align 32
316 // CHECK-NEXT: ret void
318 void testVPArg5(__vector_pair vpa[], int *ptr) {
319 __vector_pair *vpp = (__vector_pair *)ptr;
320 *vpp = vpa[0];
323 // CHECK-LABEL: @testVPArg7(
324 // CHECK-NEXT: entry:
325 // CHECK-NEXT: [[VP_ADDR:%.*]] = alloca ptr, align 8
326 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
327 // CHECK-NEXT: [[VPP:%.*]] = alloca ptr, align 8
328 // CHECK-NEXT: store ptr [[VP:%.*]], ptr [[VP_ADDR]], align 8
329 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
330 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
331 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
332 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VP_ADDR]], align 8
333 // CHECK-NEXT: [[TMP3:%.*]] = load <256 x i1>, ptr [[TMP2]], align 32
334 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VPP]], align 8
335 // CHECK-NEXT: store <256 x i1> [[TMP3]], ptr [[TMP4]], align 32
336 // CHECK-NEXT: ret void
338 void testVPArg7(const vp_t *vp, int *ptr) {
339 __vector_pair *vpp = (__vector_pair *)ptr;
340 *vpp = *vp;
343 // CHECK-LABEL: @testVPRet2(
344 // CHECK-NEXT: entry:
345 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
346 // CHECK-NEXT: [[VPP:%.*]] = alloca ptr, align 8
347 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
348 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
349 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
350 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VPP]], align 8
351 // CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <256 x i1>, ptr [[TMP2]], i64 2
352 // CHECK-NEXT: ret ptr [[ADD_PTR]]
354 __vector_pair *testVPRet2(int *ptr) {
355 __vector_pair *vpp = (__vector_pair *)ptr;
356 return vpp + 2;
359 // CHECK-LABEL: @testVPRet3(
360 // CHECK-NEXT: entry:
361 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
362 // CHECK-NEXT: [[VPP:%.*]] = alloca ptr, align 8
363 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
364 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
365 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
366 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VPP]], align 8
367 // CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <256 x i1>, ptr [[TMP2]], i64 2
368 // CHECK-NEXT: ret ptr [[ADD_PTR]]
370 const __vector_pair *testVPRet3(int *ptr) {
371 __vector_pair *vpp = (__vector_pair *)ptr;
372 return vpp + 2;
375 // CHECK-LABEL: @testVPRet5(
376 // CHECK-NEXT: entry:
377 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
378 // CHECK-NEXT: [[VPP:%.*]] = alloca ptr, align 8
379 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
380 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
381 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
382 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VPP]], align 8
383 // CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <256 x i1>, ptr [[TMP2]], i64 2
384 // CHECK-NEXT: ret ptr [[ADD_PTR]]
386 const vp_t *testVPRet5(int *ptr) {
387 __vector_pair *vpp = (__vector_pair *)ptr;
388 return vpp + 2;
391 // CHECK-LABEL: @testVPSizeofAlignof(
392 // CHECK-NEXT: entry:
393 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
394 // CHECK-NEXT: [[VPP:%.*]] = alloca ptr, align 8
395 // CHECK-NEXT: [[VP:%.*]] = alloca <256 x i1>, align 32
396 // CHECK-NEXT: [[SIZET:%.*]] = alloca i32, align 4
397 // CHECK-NEXT: [[ALIGNT:%.*]] = alloca i32, align 4
398 // CHECK-NEXT: [[SIZEV:%.*]] = alloca i32, align 4
399 // CHECK-NEXT: [[ALIGNV:%.*]] = alloca i32, align 4
400 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
401 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
402 // CHECK-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
403 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VPP]], align 8
404 // CHECK-NEXT: [[TMP3:%.*]] = load <256 x i1>, ptr [[TMP2]], align 32
405 // CHECK-NEXT: store <256 x i1> [[TMP3]], ptr [[VP]], align 32
406 // CHECK-NEXT: store i32 32, ptr [[SIZET]], align 4
407 // CHECK-NEXT: store i32 32, ptr [[ALIGNT]], align 4
408 // CHECK-NEXT: store i32 32, ptr [[SIZEV]], align 4
409 // CHECK-NEXT: store i32 32, ptr [[ALIGNV]], align 4
410 // CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[SIZET]], align 4
411 // CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ALIGNT]], align 4
412 // CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP4]], [[TMP5]]
413 // CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[SIZEV]], align 4
414 // CHECK-NEXT: [[ADD1:%.*]] = add i32 [[ADD]], [[TMP6]]
415 // CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ALIGNV]], align 4
416 // CHECK-NEXT: [[ADD2:%.*]] = add i32 [[ADD1]], [[TMP7]]
417 // CHECK-NEXT: ret i32 [[ADD2]]
419 int testVPSizeofAlignof(int *ptr) {
420 __vector_pair *vpp = (__vector_pair *)ptr;
421 __vector_pair vp = *vpp;
422 unsigned sizet = sizeof(__vector_pair);
423 unsigned alignt = __alignof__(__vector_pair);
424 unsigned sizev = sizeof(vp);
425 unsigned alignv = __alignof__(vp);
426 return sizet + alignt + sizev + alignv;