1 // REQUIRES: arm-registered-target
2 // RUN: %clang_cc1 -triple armv8.3a-arm-none-eabi -target-cpu generic \
3 // RUN: -target-feature +fullfp16 -mfloat-abi soft -emit-llvm -o - %s | \
4 // RUN: opt -S -passes=sroa -o - | FileCheck %s
8 void foo16x4_rot90(float16x4_t a
, float16x4_t b
)
10 // CHECK: call <4 x half> @llvm.arm.neon.vcadd.rot90.v4f16
11 float16x4_t result
= vcadd_rot90_f16(a
, b
);
14 void foo32x2_rot90(float32x2_t a
, float32x2_t b
)
16 // CHECK: call <2 x float> @llvm.arm.neon.vcadd.rot90.v2f32
17 float32x2_t result
= vcadd_rot90_f32(a
, b
);
20 void foo16x8_rot90(float16x8_t a
, float16x8_t b
)
22 // CHECK: call <8 x half> @llvm.arm.neon.vcadd.rot90.v8f16
23 float16x8_t result
= vcaddq_rot90_f16(a
, b
);
26 void foo32x4_rot90(float32x4_t a
, float32x4_t b
)
28 // CHECK: call <4 x float> @llvm.arm.neon.vcadd.rot90.v4f32
29 float32x4_t result
= vcaddq_rot90_f32(a
, b
);
32 void foo16x4_rot270(float16x4_t a
, float16x4_t b
)
34 // CHECK: call <4 x half> @llvm.arm.neon.vcadd.rot270.v4f16
35 float16x4_t result
= vcadd_rot270_f16(a
, b
);
38 void foo32x2_rot270(float32x2_t a
, float32x2_t b
)
40 // CHECK: call <2 x float> @llvm.arm.neon.vcadd.rot270.v2f32
41 float32x2_t result
= vcadd_rot270_f32(a
, b
);
44 void foo16x8_rot270(float16x8_t a
, float16x8_t b
)
46 // CHECK: call <8 x half> @llvm.arm.neon.vcadd.rot270.v8f16
47 float16x8_t result
= vcaddq_rot270_f16(a
, b
);
50 void foo32x4_rot270(float32x4_t a
, float32x4_t b
)
52 // CHECK: call <4 x float> @llvm.arm.neon.vcadd.rot270.v4f32
53 float32x4_t result
= vcaddq_rot270_f32(a
, b
);