1 // RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon \
2 // RUN: -target-feature +v8.3a -target-feature +fullfp16 -emit-llvm -o - %s \
5 // REQUIRES: aarch64-registered-target || arm-registered-target
9 void foo16x4_rot90(float16x4_t a
, float16x4_t b
)
11 // CHECK: call <4 x half> @llvm.aarch64.neon.vcadd.rot90.v4f16
12 float16x4_t result
= vcadd_rot90_f16(a
, b
);
15 void foo32x2_rot90(float32x2_t a
, float32x2_t b
)
17 // CHECK: call <2 x float> @llvm.aarch64.neon.vcadd.rot90.v2f32
18 float32x2_t result
= vcadd_rot90_f32(a
, b
);
21 void foo16x8_rot90(float16x8_t a
, float16x8_t b
)
23 // CHECK: call <8 x half> @llvm.aarch64.neon.vcadd.rot90.v8f16
24 float16x8_t result
= vcaddq_rot90_f16(a
, b
);
27 void foo32x4_rot90(float32x4_t a
, float32x4_t b
)
29 // CHECK: call <4 x float> @llvm.aarch64.neon.vcadd.rot90.v4f32
30 float32x4_t result
= vcaddq_rot90_f32(a
, b
);
33 void foo64x2_rot90(float64x2_t a
, float64x2_t b
)
35 // CHECK: call <2 x double> @llvm.aarch64.neon.vcadd.rot90.v2f64
36 float64x2_t result
= vcaddq_rot90_f64(a
, b
);
39 void foo16x4_rot270(float16x4_t a
, float16x4_t b
)
41 // CHECK: call <4 x half> @llvm.aarch64.neon.vcadd.rot270.v4f16
42 float16x4_t result
= vcadd_rot270_f16(a
, b
);
45 void foo32x2_rot270(float32x2_t a
, float32x2_t b
)
47 // CHECK: call <2 x float> @llvm.aarch64.neon.vcadd.rot270.v2f32
48 float32x2_t result
= vcadd_rot270_f32(a
, b
);
51 void foo16x8_rot270(float16x8_t a
, float16x8_t b
)
53 // CHECK: call <8 x half> @llvm.aarch64.neon.vcadd.rot270.v8f16
54 float16x8_t result
= vcaddq_rot270_f16(a
, b
);
57 void foo32x4_rot270(float32x4_t a
, float32x4_t b
)
59 // CHECK: call <4 x float> @llvm.aarch64.neon.vcadd.rot270.v4f32
60 float32x4_t result
= vcaddq_rot270_f32(a
, b
);
63 void foo64x2_rot270(float64x2_t a
, float64x2_t b
)
65 // CHECK: call <2 x double> @llvm.aarch64.neon.vcadd.rot270.v2f64
66 float64x2_t result
= vcaddq_rot270_f64(a
, b
);