1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -DPOLYMORPHIC -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
5 // REQUIRES: aarch64-registered-target || arm-registered-target
9 // CHECK-LABEL: @test_vqdmulltq_s16(
11 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmull.v4i32.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1)
12 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
14 int32x4_t
test_vqdmulltq_s16(int16x8_t a
, int16x8_t b
) {
16 return vqdmulltq(a
, b
);
17 #else /* POLYMORPHIC */
18 return vqdmulltq_s16(a
, b
);
19 #endif /* POLYMORPHIC */
22 // CHECK-LABEL: @test_vqdmulltq_s32(
24 // CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.arm.mve.vqdmull.v2i64.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1)
25 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
27 int64x2_t
test_vqdmulltq_s32(int32x4_t a
, int32x4_t b
) {
29 return vqdmulltq(a
, b
);
30 #else /* POLYMORPHIC */
31 return vqdmulltq_s32(a
, b
);
32 #endif /* POLYMORPHIC */
35 // CHECK-LABEL: @test_vqdmulltq_m_s16(
37 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
38 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
39 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmull.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
40 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
42 int32x4_t
test_vqdmulltq_m_s16(int32x4_t inactive
, int16x8_t a
, int16x8_t b
, mve_pred16_t p
) {
44 return vqdmulltq_m(inactive
, a
, b
, p
);
45 #else /* POLYMORPHIC */
46 return vqdmulltq_m_s16(inactive
, a
, b
, p
);
47 #endif /* POLYMORPHIC */
50 // CHECK-LABEL: @test_vqdmulltq_m_s32(
52 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
53 // CHECK-NEXT: [[TMP1:%.*]] = call <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32 [[TMP0]])
54 // CHECK-NEXT: [[TMP2:%.*]] = call <2 x i64> @llvm.arm.mve.vqdmull.predicated.v2i64.v4i32.v2i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, <2 x i1> [[TMP1]], <2 x i64> [[INACTIVE:%.*]])
55 // CHECK-NEXT: ret <2 x i64> [[TMP2]]
57 int64x2_t
test_vqdmulltq_m_s32(int64x2_t inactive
, int32x4_t a
, int32x4_t b
, mve_pred16_t p
) {
59 return vqdmulltq_m(inactive
, a
, b
, p
);
60 #else /* POLYMORPHIC */
61 return vqdmulltq_m_s32(inactive
, a
, b
, p
);
62 #endif /* POLYMORPHIC */
65 // CHECK-LABEL: @test_vqdmulltq_n_s16(
67 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
68 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
69 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmull.v4i32.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[DOTSPLAT]], i32 1)
70 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
72 int32x4_t
test_vqdmulltq_n_s16(int16x8_t a
, int16_t b
) {
74 return vqdmulltq(a
, b
);
75 #else /* POLYMORPHIC */
76 return vqdmulltq_n_s16(a
, b
);
77 #endif /* POLYMORPHIC */
80 // CHECK-LABEL: @test_vqdmulltq_n_s32(
82 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
83 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
84 // CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.arm.mve.vqdmull.v2i64.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[DOTSPLAT]], i32 1)
85 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
87 int64x2_t
test_vqdmulltq_n_s32(int32x4_t a
, int32_t b
) {
89 return vqdmulltq(a
, b
);
90 #else /* POLYMORPHIC */
91 return vqdmulltq_n_s32(a
, b
);
92 #endif /* POLYMORPHIC */
95 // CHECK-LABEL: @test_vqdmulltq_m_n_s16(
97 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
98 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
99 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
100 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
101 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmull.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], <8 x i16> [[DOTSPLAT]], i32 1, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
102 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
104 int32x4_t
test_vqdmulltq_m_n_s16(int32x4_t inactive
, int16x8_t a
, int16_t b
, mve_pred16_t p
) {
106 return vqdmulltq_m(inactive
, a
, b
, p
);
107 #else /* POLYMORPHIC */
108 return vqdmulltq_m_n_s16(inactive
, a
, b
, p
);
109 #endif /* POLYMORPHIC */
112 // CHECK-LABEL: @test_vqdmulltq_m_n_s32(
113 // CHECK-NEXT: entry:
114 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
115 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
116 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
117 // CHECK-NEXT: [[TMP1:%.*]] = call <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32 [[TMP0]])
118 // CHECK-NEXT: [[TMP2:%.*]] = call <2 x i64> @llvm.arm.mve.vqdmull.predicated.v2i64.v4i32.v2i1(<4 x i32> [[A:%.*]], <4 x i32> [[DOTSPLAT]], i32 1, <2 x i1> [[TMP1]], <2 x i64> [[INACTIVE:%.*]])
119 // CHECK-NEXT: ret <2 x i64> [[TMP2]]
121 int64x2_t
test_vqdmulltq_m_n_s32(int64x2_t inactive
, int32x4_t a
, int32_t b
, mve_pred16_t p
) {
123 return vqdmulltq_m(inactive
, a
, b
, p
);
124 #else /* POLYMORPHIC */
125 return vqdmulltq_m_n_s32(inactive
, a
, b
, p
);
126 #endif /* POLYMORPHIC */