[LLVM][IR] Use splat syntax when printing ConstantExpr based splats. (#116856)
[llvm-project.git] / clang / test / CodeGen / arm-mve-intrinsics / cplusplus.cpp
blob29719614d04fbce321d4a2477420b9a5bf5e2b4e
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -disable-O0-optnone -DPOLYMORPHIC -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
5 // REQUIRES: aarch64-registered-target || arm-registered-target
7 #include <arm_mve.h>
9 // CHECK-LABEL: @_Z16test_vbicq_n_s1617__simd128_int16_t(
10 // CHECK-NEXT: entry:
11 // CHECK-NEXT: [[TMP0:%.*]] = and <8 x i16> [[A:%.*]], splat (i16 11007)
12 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
14 int16x8_t test_vbicq_n_s16(int16x8_t a)
16 #ifdef POLYMORPHIC
17 return vbicq(a, 0xd500);
18 #else /* POLYMORPHIC */
19 return vbicq_n_s16(a, 0xd500);
20 #endif /* POLYMORPHIC */
23 // CHECK-LABEL: @_Z16test_vbicq_n_u3218__simd128_uint32_t(
24 // CHECK-NEXT: entry:
25 // CHECK-NEXT: [[TMP0:%.*]] = and <4 x i32> [[A:%.*]], splat (i32 -8193)
26 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
28 uint32x4_t test_vbicq_n_u32(uint32x4_t a)
30 #ifdef POLYMORPHIC
31 return vbicq(a, 0x2000);
32 #else /* POLYMORPHIC */
33 return vbicq_n_u32(a, 0x2000);
34 #endif /* POLYMORPHIC */
37 // CHECK-LABEL: @_Z16test_vorrq_n_s3217__simd128_int32_t(
38 // CHECK-NEXT: entry:
39 // CHECK-NEXT: [[TMP0:%.*]] = or <4 x i32> [[A:%.*]], splat (i32 65536)
40 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
42 int32x4_t test_vorrq_n_s32(int32x4_t a)
44 #ifdef POLYMORPHIC
45 return vorrq(a, 0x10000);
46 #else /* POLYMORPHIC */
47 return vorrq_n_s32(a, 0x10000);
48 #endif /* POLYMORPHIC */
51 // CHECK-LABEL: @_Z16test_vorrq_n_u1618__simd128_uint16_t(
52 // CHECK-NEXT: entry:
53 // CHECK-NEXT: [[TMP0:%.*]] = or <8 x i16> [[A:%.*]], splat (i16 -4096)
54 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
56 uint16x8_t test_vorrq_n_u16(uint16x8_t a)
58 #ifdef POLYMORPHIC
59 return vorrq(a, 0xf000);
60 #else /* POLYMORPHIC */
61 return vorrq_n_u16(a, 0xf000);
62 #endif /* POLYMORPHIC */
65 // CHECK-LABEL: @_Z16test_vcmpeqq_f1619__simd128_float16_tS_(
66 // CHECK-NEXT: entry:
67 // CHECK-NEXT: [[TMP0:%.*]] = fcmp oeq <8 x half> [[A:%.*]], [[B:%.*]]
68 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
69 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
70 // CHECK-NEXT: ret i16 [[TMP2]]
72 mve_pred16_t test_vcmpeqq_f16(float16x8_t a, float16x8_t b)
74 #ifdef POLYMORPHIC
75 return vcmpeqq(a, b);
76 #else /* POLYMORPHIC */
77 return vcmpeqq_f16(a, b);
78 #endif /* POLYMORPHIC */
81 // CHECK-LABEL: @_Z18test_vcmpeqq_n_f1619__simd128_float16_tDh(
82 // CHECK-NEXT: entry:
83 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i64 0
84 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
85 // CHECK-NEXT: [[TMP0:%.*]] = fcmp oeq <8 x half> [[A:%.*]], [[DOTSPLAT]]
86 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
87 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
88 // CHECK-NEXT: ret i16 [[TMP2]]
90 mve_pred16_t test_vcmpeqq_n_f16(float16x8_t a, float16_t b)
92 #ifdef POLYMORPHIC
93 return vcmpeqq(a, b);
94 #else /* POLYMORPHIC */
95 return vcmpeqq_n_f16(a, b);
96 #endif /* POLYMORPHIC */
99 // CHECK-LABEL: @_Z14test_vld1q_u16PKt(
100 // CHECK-NEXT: entry:
101 // CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[BASE:%.*]], align 2
102 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
104 uint16x8_t test_vld1q_u16(const uint16_t *base)
106 #ifdef POLYMORPHIC
107 return vld1q(base);
108 #else /* POLYMORPHIC */
109 return vld1q_u16(base);
110 #endif /* POLYMORPHIC */
113 // CHECK-LABEL: @_Z16test_vst1q_p_s32Pi17__simd128_int32_tt(
114 // CHECK-NEXT: entry:
115 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
116 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
117 // CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
118 // CHECK-NEXT: ret void
120 void test_vst1q_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p)
122 #ifdef POLYMORPHIC
123 vst1q_p(base, value, p);
124 #else /* POLYMORPHIC */
125 vst1q_p_s32(base, value, p);
126 #endif /* POLYMORPHIC */
129 // CHECK-LABEL: @_Z30test_vldrdq_gather_base_wb_s64P18__simd128_uint64_t(
130 // CHECK-NEXT: entry:
131 // CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr [[ADDR:%.*]], align 8
132 // CHECK-NEXT: [[TMP1:%.*]] = call { <2 x i64>, <2 x i64> } @llvm.arm.mve.vldr.gather.base.wb.v2i64.v2i64(<2 x i64> [[TMP0]], i32 576)
133 // CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <2 x i64>, <2 x i64> } [[TMP1]], 1
134 // CHECK-NEXT: store <2 x i64> [[TMP2]], ptr [[ADDR]], align 8
135 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <2 x i64>, <2 x i64> } [[TMP1]], 0
136 // CHECK-NEXT: ret <2 x i64> [[TMP3]]
138 int64x2_t test_vldrdq_gather_base_wb_s64(uint64x2_t *addr)
140 return vldrdq_gather_base_wb_s64(addr, 0x240);
143 // CHECK-LABEL: @_Z31test_vstrwq_scatter_base_wb_u32P18__simd128_uint32_tS_(
144 // CHECK-NEXT: entry:
145 // CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr [[ADDR:%.*]], align 8
146 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.arm.mve.vstr.scatter.base.wb.v4i32.v4i32(<4 x i32> [[TMP0]], i32 64, <4 x i32> [[VALUE:%.*]])
147 // CHECK-NEXT: store <4 x i32> [[TMP1]], ptr [[ADDR]], align 8
148 // CHECK-NEXT: ret void
150 void test_vstrwq_scatter_base_wb_u32(uint32x4_t *addr, uint32x4_t value)
152 #ifdef POLYMORPHIC
153 vstrwq_scatter_base_wb(addr, 0x40, value);
154 #else /* POLYMORPHIC */
155 vstrwq_scatter_base_wb_u32(addr, 0x40, value);
156 #endif /* POLYMORPHIC */