1 ! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
2 ! REQUIRES: target=powerpc{{.*}}
6 ! CHECK-LABEL: vec_add_testf32
7 subroutine vec_add_testf32(x
, y
)
8 vector(real(4)) :: vsum
, x
, y
11 ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
12 ! LLVMIR: %[[y:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
13 ! LLVMIR: %{{[0-9]}} = fadd contract <4 x float> %[[x]], %[[y]]
14 end subroutine vec_add_testf32
16 ! CHECK-LABEL: vec_add_testf64
17 subroutine vec_add_testf64(x
, y
)
18 vector(real(8)) :: vsum
, x
, y
21 ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
22 ! LLVMIR: %[[y:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
23 ! LLVMIR: %{{[0-9]}} = fadd contract <2 x double> %[[x]], %[[y]]
24 end subroutine vec_add_testf64
26 ! CHECK-LABEL: vec_add_testi8
27 subroutine vec_add_testi8(x
, y
)
28 vector(integer(1)) :: vsum
, x
, y
31 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
32 ! LLVMIR: %[[y:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
33 ! LLVMIR: %{{[0-9]}} = add <16 x i8> %[[x]], %[[y]]
34 end subroutine vec_add_testi8
36 ! CHECK-LABEL: vec_add_testi16
37 subroutine vec_add_testi16(x
, y
)
38 vector(integer(2)) :: vsum
, x
, y
41 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
42 ! LLVMIR: %[[y:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
43 ! LLVMIR: %{{[0-9]}} = add <8 x i16> %[[x]], %[[y]]
44 end subroutine vec_add_testi16
46 ! CHECK-LABEL: vec_add_testi32
47 subroutine vec_add_testi32(x
, y
)
48 vector(integer(4)) :: vsum
, x
, y
51 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
52 ! LLVMIR: %[[y:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
53 ! LLVMIR: %{{[0-9]}} = add <4 x i32> %[[x]], %[[y]]
54 end subroutine vec_add_testi32
56 ! CHECK-LABEL: vec_add_testi64
57 subroutine vec_add_testi64(x
, y
)
58 vector(integer(8)) :: vsum
, x
, y
61 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
62 ! LLVMIR: %[[y:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
63 ! LLVMIR: %{{[0-9]}} = add <2 x i64> %[[x]], %[[y]]
64 end subroutine vec_add_testi64
66 ! CHECK-LABEL: vec_add_testui8
67 subroutine vec_add_testui8(x
, y
)
68 vector(unsigned(1)) :: vsum
, x
, y
71 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
72 ! LLVMIR: %[[y:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
73 ! LLVMIR: %{{[0-9]}} = add <16 x i8> %[[x]], %[[y]]
74 end subroutine vec_add_testui8
76 ! CHECK-LABEL: vec_add_testui16
77 subroutine vec_add_testui16(x
, y
)
78 vector(unsigned(2)) :: vsum
, x
, y
81 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
82 ! LLVMIR: %[[y:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
83 ! LLVMIR: %{{[0-9]}} = add <8 x i16> %[[x]], %[[y]]
84 end subroutine vec_add_testui16
86 ! CHECK-LABEL: vec_add_testui32
87 subroutine vec_add_testui32(x
, y
)
88 vector(unsigned(4)) :: vsum
, x
, y
91 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
92 ! LLVMIR: %[[y:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
93 ! LLVMIR: %{{[0-9]}} = add <4 x i32> %[[x]], %[[y]]
94 end subroutine vec_add_testui32
96 ! CHECK-LABEL: vec_add_testui64
97 subroutine vec_add_testui64(x
, y
)
98 vector(unsigned(8)) :: vsum
, x
, y
101 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
102 ! LLVMIR: %[[y:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
103 ! LLVMIR: %{{[0-9]}} = add <2 x i64> %[[x]], %[[y]]
104 end subroutine vec_add_testui64
108 ! CHECK-LABEL: vec_mul_testf32
109 subroutine vec_mul_testf32(x
, y
)
110 vector(real(4)) :: vmul
, x
, y
113 ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
114 ! LLVMIR: %[[y:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
115 ! LLVMIR: %{{[0-9]}} = fmul contract <4 x float> %[[x]], %[[y]]
116 end subroutine vec_mul_testf32
118 ! CHECK-LABEL: vec_mul_testf64
119 subroutine vec_mul_testf64(x
, y
)
120 vector(real(8)) :: vmul
, x
, y
123 ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
124 ! LLVMIR: %[[y:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
125 ! LLVMIR: %{{[0-9]}} = fmul contract <2 x double> %[[x]], %[[y]]
126 end subroutine vec_mul_testf64
128 ! CHECK-LABEL: vec_mul_testi8
129 subroutine vec_mul_testi8(x
, y
)
130 vector(integer(1)) :: vmul
, x
, y
133 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
134 ! LLVMIR: %[[y:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
135 ! LLVMIR: %{{[0-9]}} = mul <16 x i8> %[[x]], %[[y]]
136 end subroutine vec_mul_testi8
138 ! CHECK-LABEL: vec_mul_testi16
139 subroutine vec_mul_testi16(x
, y
)
140 vector(integer(2)) :: vmul
, x
, y
143 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
144 ! LLVMIR: %[[y:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
145 ! LLVMIR: %{{[0-9]}} = mul <8 x i16> %[[x]], %[[y]]
146 end subroutine vec_mul_testi16
148 ! CHECK-LABEL: vec_mul_testi32
149 subroutine vec_mul_testi32(x
, y
)
150 vector(integer(4)) :: vmul
, x
, y
153 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
154 ! LLVMIR: %[[y:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
155 ! LLVMIR: %{{[0-9]}} = mul <4 x i32> %[[x]], %[[y]]
156 end subroutine vec_mul_testi32
158 ! CHECK-LABEL: vec_mul_testi64
159 subroutine vec_mul_testi64(x
, y
)
160 vector(integer(8)) :: vmul
, x
, y
163 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
164 ! LLVMIR: %[[y:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
165 ! LLVMIR: %{{[0-9]}} = mul <2 x i64> %[[x]], %[[y]]
166 end subroutine vec_mul_testi64
168 ! CHECK-LABEL: vec_mul_testui8
169 subroutine vec_mul_testui8(x
, y
)
170 vector(unsigned(1)) :: vmul
, x
, y
173 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
174 ! LLVMIR: %[[y:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
175 ! LLVMIR: %{{[0-9]}} = mul <16 x i8> %[[x]], %[[y]]
176 end subroutine vec_mul_testui8
178 ! CHECK-LABEL: vec_mul_testui16
179 subroutine vec_mul_testui16(x
, y
)
180 vector(unsigned(2)) :: vmul
, x
, y
183 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
184 ! LLVMIR: %[[y:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
185 ! LLVMIR: %{{[0-9]}} = mul <8 x i16> %[[x]], %[[y]]
186 end subroutine vec_mul_testui16
188 ! CHECK-LABEL: vec_mul_testui32
189 subroutine vec_mul_testui32(x
, y
)
190 vector(unsigned(4)) :: vmul
, x
, y
193 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
194 ! LLVMIR: %[[y:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
195 ! LLVMIR: %{{[0-9]}} = mul <4 x i32> %[[x]], %[[y]]
196 end subroutine vec_mul_testui32
198 ! CHECK-LABEL: vec_mul_testui64
199 subroutine vec_mul_testui64(x
, y
)
200 vector(unsigned(8)) :: vmul
, x
, y
203 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
204 ! LLVMIR: %[[y:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
205 ! LLVMIR: %{{[0-9]}} = mul <2 x i64> %[[x]], %[[y]]
206 end subroutine vec_mul_testui64
210 ! CHECK-LABEL: vec_sub_testf32
211 subroutine vec_sub_testf32(x
, y
)
212 vector(real(4)) :: vsub
, x
, y
215 ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
216 ! LLVMIR: %[[y:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
217 ! LLVMIR: %{{[0-9]}} = fsub contract <4 x float> %[[x]], %[[y]]
218 end subroutine vec_sub_testf32
220 ! CHECK-LABEL: vec_sub_testf64
221 subroutine vec_sub_testf64(x
, y
)
222 vector(real(8)) :: vsub
, x
, y
225 ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
226 ! LLVMIR: %[[y:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
227 ! LLVMIR: %{{[0-9]}} = fsub contract <2 x double> %[[x]], %[[y]]
228 end subroutine vec_sub_testf64
230 ! CHECK-LABEL: vec_sub_testi8
231 subroutine vec_sub_testi8(x
, y
)
232 vector(integer(1)) :: vsub
, x
, y
235 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
236 ! LLVMIR: %[[y:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
237 ! LLVMIR: %{{[0-9]}} = sub <16 x i8> %[[x]], %[[y]]
238 end subroutine vec_sub_testi8
240 ! CHECK-LABEL: vec_sub_testi16
241 subroutine vec_sub_testi16(x
, y
)
242 vector(integer(2)) :: vsub
, x
, y
245 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
246 ! LLVMIR: %[[y:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
247 ! LLVMIR: %{{[0-9]}} = sub <8 x i16> %[[x]], %[[y]]
248 end subroutine vec_sub_testi16
250 ! CHECK-LABEL: vec_sub_testi32
251 subroutine vec_sub_testi32(x
, y
)
252 vector(integer(4)) :: vsub
, x
, y
255 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
256 ! LLVMIR: %[[y:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
257 ! LLVMIR: %{{[0-9]}} = sub <4 x i32> %[[x]], %[[y]]
258 end subroutine vec_sub_testi32
260 ! CHECK-LABEL: vec_sub_testi64
261 subroutine vec_sub_testi64(x
, y
)
262 vector(integer(8)) :: vsub
, x
, y
265 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
266 ! LLVMIR: %[[y:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
267 ! LLVMIR: %{{[0-9]}} = sub <2 x i64> %[[x]], %[[y]]
268 end subroutine vec_sub_testi64
270 ! CHECK-LABEL: vec_sub_testui8
271 subroutine vec_sub_testui8(x
, y
)
272 vector(unsigned(1)) :: vsub
, x
, y
275 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
276 ! LLVMIR: %[[y:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
277 ! LLVMIR: %{{[0-9]}} = sub <16 x i8> %[[x]], %[[y]]
278 end subroutine vec_sub_testui8
280 ! CHECK-LABEL: vec_sub_testui16
281 subroutine vec_sub_testui16(x
, y
)
282 vector(unsigned(2)) :: vsub
, x
, y
285 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
286 ! LLVMIR: %[[y:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
287 ! LLVMIR: %{{[0-9]}} = sub <8 x i16> %[[x]], %[[y]]
288 end subroutine vec_sub_testui16
290 ! CHECK-LABEL: vec_sub_testui32
291 subroutine vec_sub_testui32(x
, y
)
292 vector(unsigned(4)) :: vsub
, x
, y
295 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
296 ! LLVMIR: %[[y:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
297 ! LLVMIR: %{{[0-9]}} = sub <4 x i32> %[[x]], %[[y]]
298 end subroutine vec_sub_testui32
300 ! CHECK-LABEL: vec_sub_testui64
301 subroutine vec_sub_testui64(x
, y
)
302 vector(unsigned(8)) :: vsub
, x
, y
305 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
306 ! LLVMIR: %[[y:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
307 ! LLVMIR: %{{[0-9]}} = sub <2 x i64> %[[x]], %[[y]]
308 end subroutine vec_sub_testui64
310 !----------------------
312 !----------------------
314 ! CHECK-LABEL: vec_and_test_i8
315 subroutine vec_and_test_i8(arg1
, arg2
)
316 vector(integer(1)) :: r
, arg1
, arg2
317 r
= vec_and(arg1
, arg2
)
319 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
320 ! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
321 ! LLVMIR: %{{[0-9]+}} = and <16 x i8> %[[arg1]], %[[arg2]]
322 end subroutine vec_and_test_i8
324 ! CHECK-LABEL: vec_and_test_i16
325 subroutine vec_and_test_i16(arg1
, arg2
)
326 vector(integer(2)) :: r
, arg1
, arg2
327 r
= vec_and(arg1
, arg2
)
329 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
330 ! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
331 ! LLVMIR: %{{[0-9]+}} = and <8 x i16> %[[arg1]], %[[arg2]]
332 end subroutine vec_and_test_i16
334 ! CHECK-LABEL: vec_and_test_i32
335 subroutine vec_and_test_i32(arg1
, arg2
)
336 vector(integer(4)) :: r
, arg1
, arg2
337 r
= vec_and(arg1
, arg2
)
339 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
340 ! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
341 ! LLVMIR: %{{[0-9]+}} = and <4 x i32> %[[arg1]], %[[arg2]]
342 end subroutine vec_and_test_i32
344 ! CHECK-LABEL: vec_and_test_i64
345 subroutine vec_and_test_i64(arg1
, arg2
)
346 vector(integer(8)) :: r
, arg1
, arg2
347 r
= vec_and(arg1
, arg2
)
349 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
350 ! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
351 ! LLVMIR: %{{[0-9]+}} = and <2 x i64> %[[arg1]], %[[arg2]]
352 end subroutine vec_and_test_i64
354 ! CHECK-LABEL: vec_and_test_u8
355 subroutine vec_and_test_u8(arg1
, arg2
)
356 vector(unsigned(1)) :: r
, arg1
, arg2
357 r
= vec_and(arg1
, arg2
)
359 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
360 ! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
361 ! LLVMIR: %{{[0-9]+}} = and <16 x i8> %[[arg1]], %[[arg2]]
362 end subroutine vec_and_test_u8
364 ! CHECK-LABEL: vec_and_test_u16
365 subroutine vec_and_test_u16(arg1
, arg2
)
366 vector(unsigned(2)) :: r
, arg1
, arg2
367 r
= vec_and(arg1
, arg2
)
369 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
370 ! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
371 ! LLVMIR: %{{[0-9]+}} = and <8 x i16> %[[arg1]], %[[arg2]]
372 end subroutine vec_and_test_u16
374 ! CHECK-LABEL: vec_and_test_u32
375 subroutine vec_and_test_u32(arg1
, arg2
)
376 vector(unsigned(4)) :: r
, arg1
, arg2
377 r
= vec_and(arg1
, arg2
)
379 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
380 ! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
381 ! LLVMIR: %{{[0-9]+}} = and <4 x i32> %[[arg1]], %[[arg2]]
382 end subroutine vec_and_test_u32
384 ! CHECK-LABEL: vec_and_test_u64
385 subroutine vec_and_test_u64(arg1
, arg2
)
386 vector(unsigned(8)) :: r
, arg1
, arg2
387 r
= vec_and(arg1
, arg2
)
389 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
390 ! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
391 ! LLVMIR: %{{[0-9]+}} = and <2 x i64> %[[arg1]], %[[arg2]]
392 end subroutine vec_and_test_u64
394 ! CHECK-LABEL: vec_and_testf32
395 subroutine vec_and_testf32(arg1
, arg2
)
396 vector(real(4)) :: r
, arg1
, arg2
397 r
= vec_and(arg1
, arg2
)
399 ! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
400 ! LLVMIR: %[[arg2:.*]] = load <4 x float>, ptr %{{.*}}, align 16
401 ! LLVMIR: %[[bc1:.*]] = bitcast <4 x float> %[[arg1]] to <4 x i32>
402 ! LLVMIR: %[[bc2:.*]] = bitcast <4 x float> %[[arg2]] to <4 x i32>
403 ! LLVMIR: %[[r:.*]] = and <4 x i32> %[[bc1]], %[[bc2]]
404 ! LLVMIR: %{{[0-9]+}} = bitcast <4 x i32> %[[r]] to <4 x float>
405 end subroutine vec_and_testf32
407 ! CHECK-LABEL: vec_and_testf64
408 subroutine vec_and_testf64(arg1
, arg2
)
409 vector(real(8)) :: r
, arg1
, arg2
410 r
= vec_and(arg1
, arg2
)
412 ! LLVMIR: %[[arg1:.*]] = load <2 x double>, ptr %{{.*}}, align 16
413 ! LLVMIR: %[[arg2:.*]] = load <2 x double>, ptr %{{.*}}, align 16
414 ! LLVMIR: %[[bc1:.*]] = bitcast <2 x double> %[[arg1]] to <2 x i64>
415 ! LLVMIR: %[[bc2:.*]] = bitcast <2 x double> %[[arg2]] to <2 x i64>
416 ! LLVMIR: %[[r:.*]] = and <2 x i64> %[[bc1]], %[[bc2]]
417 ! LLVMIR: %{{[0-9]+}} = bitcast <2 x i64> %[[r]] to <2 x double>
418 end subroutine vec_and_testf64
420 !----------------------
422 !----------------------
424 ! CHECK-LABEL: vec_xor_test_i8
425 subroutine vec_xor_test_i8(arg1
, arg2
)
426 vector(integer(1)) :: r
, arg1
, arg2
427 r
= vec_xor(arg1
, arg2
)
429 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
430 ! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
431 ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[arg1]], %[[arg2]]
432 end subroutine vec_xor_test_i8
434 ! CHECK-LABEL: vec_xor_test_i16
435 subroutine vec_xor_test_i16(arg1
, arg2
)
436 vector(integer(2)) :: r
, arg1
, arg2
437 r
= vec_xor(arg1
, arg2
)
439 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
440 ! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
441 ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[arg1]], %[[arg2]]
442 end subroutine vec_xor_test_i16
444 ! CHECK-LABEL: vec_xor_test_i32
445 subroutine vec_xor_test_i32(arg1
, arg2
)
446 vector(integer(4)) :: r
, arg1
, arg2
447 r
= vec_xor(arg1
, arg2
)
449 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
450 ! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
451 ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[arg1]], %[[arg2]]
452 end subroutine vec_xor_test_i32
454 ! CHECK-LABEL: vec_xor_test_i64
455 subroutine vec_xor_test_i64(arg1
, arg2
)
456 vector(integer(8)) :: r
, arg1
, arg2
457 r
= vec_xor(arg1
, arg2
)
459 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
460 ! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
461 ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[arg1]], %[[arg2]]
462 end subroutine vec_xor_test_i64
464 ! CHECK-LABEL: vec_xor_test_u8
465 subroutine vec_xor_test_u8(arg1
, arg2
)
466 vector(unsigned(1)) :: r
, arg1
, arg2
467 r
= vec_xor(arg1
, arg2
)
469 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
470 ! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
471 ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[arg1]], %[[arg2]]
472 end subroutine vec_xor_test_u8
474 ! CHECK-LABEL: vec_xor_test_u16
475 subroutine vec_xor_test_u16(arg1
, arg2
)
476 vector(unsigned(2)) :: r
, arg1
, arg2
477 r
= vec_xor(arg1
, arg2
)
479 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
480 ! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
481 ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[arg1]], %[[arg2]]
482 end subroutine vec_xor_test_u16
484 ! CHECK-LABEL: vec_xor_test_u32
485 subroutine vec_xor_test_u32(arg1
, arg2
)
486 vector(unsigned(4)) :: r
, arg1
, arg2
487 r
= vec_xor(arg1
, arg2
)
489 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
490 ! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
491 ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[arg1]], %[[arg2]]
492 end subroutine vec_xor_test_u32
494 ! CHECK-LABEL: vec_xor_test_u64
495 subroutine vec_xor_test_u64(arg1
, arg2
)
496 vector(unsigned(8)) :: r
, arg1
, arg2
497 r
= vec_xor(arg1
, arg2
)
499 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
500 ! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
501 ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[arg1]], %[[arg2]]
502 end subroutine vec_xor_test_u64
504 ! CHECK-LABEL: vec_xor_testf32
505 subroutine vec_xor_testf32(arg1
, arg2
)
506 vector(real(4)) :: r
, arg1
, arg2
507 r
= vec_xor(arg1
, arg2
)
509 ! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
510 ! LLVMIR: %[[arg2:.*]] = load <4 x float>, ptr %{{.*}}, align 16
511 ! LLVMIR: %[[bc1:.*]] = bitcast <4 x float> %[[arg1]] to <4 x i32>
512 ! LLVMIR: %[[bc2:.*]] = bitcast <4 x float> %[[arg2]] to <4 x i32>
513 ! LLVMIR: %[[r:.*]] = xor <4 x i32> %[[bc1]], %[[bc2]]
514 ! LLVMIR: %{{[0-9]+}} = bitcast <4 x i32> %[[r]] to <4 x float>
515 end subroutine vec_xor_testf32
517 ! CHECK-LABEL: vec_xor_testf64
518 subroutine vec_xor_testf64(arg1
, arg2
)
519 vector(real(8)) :: r
, arg1
, arg2
520 r
= vec_xor(arg1
, arg2
)
522 ! LLVMIR: %[[arg1:.*]] = load <2 x double>, ptr %{{.*}}, align 16
523 ! LLVMIR: %[[arg2:.*]] = load <2 x double>, ptr %{{.*}}, align 16
524 ! LLVMIR: %[[bc1:.*]] = bitcast <2 x double> %[[arg1]] to <2 x i64>
525 ! LLVMIR: %[[bc2:.*]] = bitcast <2 x double> %[[arg2]] to <2 x i64>
526 ! LLVMIR: %[[r:.*]] = xor <2 x i64> %[[bc1]], %[[bc2]]
527 ! LLVMIR: %{{[0-9]+}} = bitcast <2 x i64> %[[r]] to <2 x double>
528 end subroutine vec_xor_testf64