[OptTable] Fix typo VALUE => VALUES (NFCI) (#121523)
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-vls-shift-ops.c
blob7c4196a879f61802eab5395238d1a067e7445056
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple riscv64-none-linux-gnu -target-feature +zve64d \
3 // RUN: -target-feature +f -target-feature +d -disable-O0-optnone \
4 // RUN: -mvscale-min=4 -mvscale-max=4 -emit-llvm -o - %s | \
5 // RUN: opt -S -passes=sroa | FileCheck %s
7 // REQUIRES: riscv-registered-target
9 #include <stdint.h>
11 typedef __rvv_int8m1_t vint8m1_t;
12 typedef __rvv_uint8m1_t vuint8m1_t;
13 typedef __rvv_int16m1_t vint16m1_t;
14 typedef __rvv_uint16m1_t vuint16m1_t;
15 typedef __rvv_int32m1_t vint32m1_t;
16 typedef __rvv_uint32m1_t vuint32m1_t;
17 typedef __rvv_int64m1_t vint64m1_t;
18 typedef __rvv_uint64m1_t vuint64m1_t;
19 typedef __rvv_float32m1_t vfloat32m1_t;
20 typedef __rvv_float64m1_t vfloat64m1_t;
22 typedef vint8m1_t fixed_int8m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
23 typedef vint16m1_t fixed_int16m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
24 typedef vint32m1_t fixed_int32m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
25 typedef vint64m1_t fixed_int64m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
27 typedef vuint8m1_t fixed_uint8m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
28 typedef vuint16m1_t fixed_uint16m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
29 typedef vuint32m1_t fixed_uint32m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
30 typedef vuint64m1_t fixed_uint64m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
32 typedef vfloat32m1_t fixed_float32m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
33 typedef vfloat64m1_t fixed_float64m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
35 // CHECK-LABEL: @lshift_i8(
36 // CHECK-NEXT: entry:
37 // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
38 // CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
39 // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i8> [[A]], [[B]]
40 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> poison, <32 x i8> [[SHL]], i64 0)
41 // CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
43 fixed_int8m1_t lshift_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
44 return a << b;
47 // CHECK-LABEL: @rshift_i8(
48 // CHECK-NEXT: entry:
49 // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
50 // CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
51 // CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i8> [[A]], [[B]]
52 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> poison, <32 x i8> [[SHR]], i64 0)
53 // CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
55 fixed_int8m1_t rshift_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
56 return a >> b;
59 // CHECK-LABEL: @lshift_u8(
60 // CHECK-NEXT: entry:
61 // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
62 // CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
63 // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i8> [[A]], [[B]]
64 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> poison, <32 x i8> [[SHL]], i64 0)
65 // CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
67 fixed_uint8m1_t lshift_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
68 return a << b;
71 // CHECK-LABEL: @rshift_u8(
72 // CHECK-NEXT: entry:
73 // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
74 // CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
75 // CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i8> [[A]], [[B]]
76 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> poison, <32 x i8> [[SHR]], i64 0)
77 // CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
79 fixed_uint8m1_t rshift_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
80 return a >> b;
83 // CHECK-LABEL: @lshift_i16(
84 // CHECK-NEXT: entry:
85 // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
86 // CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
87 // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i16> [[A]], [[B]]
88 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> poison, <16 x i16> [[SHL]], i64 0)
89 // CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
91 fixed_int16m1_t lshift_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
92 return a << b;
95 // CHECK-LABEL: @rshift_i16(
96 // CHECK-NEXT: entry:
97 // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
98 // CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
99 // CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i16> [[A]], [[B]]
100 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> poison, <16 x i16> [[SHR]], i64 0)
101 // CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
103 fixed_int16m1_t rshift_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
104 return a >> b;
107 // CHECK-LABEL: @lshift_u16(
108 // CHECK-NEXT: entry:
109 // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
110 // CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
111 // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i16> [[A]], [[B]]
112 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> poison, <16 x i16> [[SHL]], i64 0)
113 // CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
115 fixed_uint16m1_t lshift_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
116 return a << b;
119 // CHECK-LABEL: @rshift_u16(
120 // CHECK-NEXT: entry:
121 // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
122 // CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
123 // CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i16> [[A]], [[B]]
124 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> poison, <16 x i16> [[SHR]], i64 0)
125 // CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
127 fixed_uint16m1_t rshift_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
128 return a >> b;
131 // CHECK-LABEL: @lshift_i32(
132 // CHECK-NEXT: entry:
133 // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
134 // CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
135 // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i32> [[A]], [[B]]
136 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[SHL]], i64 0)
137 // CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
139 fixed_int32m1_t lshift_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
140 return a << b;
143 // CHECK-LABEL: @rshift_i32(
144 // CHECK-NEXT: entry:
145 // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
146 // CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
147 // CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i32> [[A]], [[B]]
148 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[SHR]], i64 0)
149 // CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
151 fixed_int32m1_t rshift_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
152 return a >> b;
155 // CHECK-LABEL: @lshift_u32(
156 // CHECK-NEXT: entry:
157 // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
158 // CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
159 // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i32> [[A]], [[B]]
160 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[SHL]], i64 0)
161 // CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
163 fixed_uint32m1_t lshift_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
164 return a << b;
167 // CHECK-LABEL: @rshift_u32(
168 // CHECK-NEXT: entry:
169 // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
170 // CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
171 // CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i32> [[A]], [[B]]
172 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[SHR]], i64 0)
173 // CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
175 fixed_uint32m1_t rshift_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
176 return a >> b;
179 // CHECK-LABEL: @lshift_i64(
180 // CHECK-NEXT: entry:
181 // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
182 // CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
183 // CHECK-NEXT: [[SHL:%.*]] = shl <4 x i64> [[A]], [[B]]
184 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> poison, <4 x i64> [[SHL]], i64 0)
185 // CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
187 fixed_int64m1_t lshift_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
188 return a << b;
191 // CHECK-LABEL: @rshift_i64(
192 // CHECK-NEXT: entry:
193 // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
194 // CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
195 // CHECK-NEXT: [[SHR:%.*]] = ashr <4 x i64> [[A]], [[B]]
196 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> poison, <4 x i64> [[SHR]], i64 0)
197 // CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
199 fixed_int64m1_t rshift_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
200 return a >> b;
203 // CHECK-LABEL: @lshift_u64(
204 // CHECK-NEXT: entry:
205 // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
206 // CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
207 // CHECK-NEXT: [[SHL:%.*]] = shl <4 x i64> [[A]], [[B]]
208 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> poison, <4 x i64> [[SHL]], i64 0)
209 // CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
211 fixed_uint64m1_t lshift_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
212 return a << b;
215 // CHECK-LABEL: @rshift_u64(
216 // CHECK-NEXT: entry:
217 // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
218 // CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
219 // CHECK-NEXT: [[SHR:%.*]] = lshr <4 x i64> [[A]], [[B]]
220 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> poison, <4 x i64> [[SHR]], i64 0)
221 // CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
223 fixed_uint64m1_t rshift_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
224 return a >> b;
227 // CHECK-LABEL: @lshift_i8_rsplat(
228 // CHECK-NEXT: entry:
229 // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
230 // CHECK-NEXT: [[CONV:%.*]] = sext i8 [[B:%.*]] to i32
231 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i64 0
232 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
233 // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i8>
234 // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i8> [[A]], [[SH_PROM]]
235 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> poison, <32 x i8> [[SHL]], i64 0)
236 // CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
238 fixed_int8m1_t lshift_i8_rsplat(fixed_int8m1_t a, int8_t b) {
239 return a << b;
242 // CHECK-LABEL: @lshift_i8_lsplat(
243 // CHECK-NEXT: entry:
244 // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
245 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
246 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
247 // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i8> [[SPLAT_SPLAT]], [[A]]
248 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> poison, <32 x i8> [[SHL]], i64 0)
249 // CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
251 fixed_int8m1_t lshift_i8_lsplat(fixed_int8m1_t a, int8_t b) {
252 return b << a;
255 // CHECK-LABEL: @rshift_i8_rsplat(
256 // CHECK-NEXT: entry:
257 // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
258 // CHECK-NEXT: [[CONV:%.*]] = sext i8 [[B:%.*]] to i32
259 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i64 0
260 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
261 // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i8>
262 // CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i8> [[A]], [[SH_PROM]]
263 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> poison, <32 x i8> [[SHR]], i64 0)
264 // CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
266 fixed_int8m1_t rshift_i8_rsplat(fixed_int8m1_t a, int8_t b) {
267 return a >> b;
270 // CHECK-LABEL: @rshift_i8_lsplat(
271 // CHECK-NEXT: entry:
272 // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
273 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
274 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
275 // CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i8> [[SPLAT_SPLAT]], [[A]]
276 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> poison, <32 x i8> [[SHR]], i64 0)
277 // CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
279 fixed_int8m1_t rshift_i8_lsplat(fixed_int8m1_t a, int8_t b) {
280 return b >> a;
283 // CHECK-LABEL: @lshift_u8_rsplat(
284 // CHECK-NEXT: entry:
285 // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
286 // CHECK-NEXT: [[CONV:%.*]] = zext i8 [[B:%.*]] to i32
287 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i64 0
288 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
289 // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i8>
290 // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i8> [[A]], [[SH_PROM]]
291 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> poison, <32 x i8> [[SHL]], i64 0)
292 // CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
294 fixed_uint8m1_t lshift_u8_rsplat(fixed_uint8m1_t a, uint8_t b) {
295 return a << b;
298 // CHECK-LABEL: @lshift_u8_lsplat(
299 // CHECK-NEXT: entry:
300 // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
301 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
302 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
303 // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i8> [[SPLAT_SPLAT]], [[A]]
304 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> poison, <32 x i8> [[SHL]], i64 0)
305 // CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
307 fixed_uint8m1_t lshift_u8_lsplat(fixed_uint8m1_t a, uint8_t b) {
308 return b << a;
311 // CHECK-LABEL: @rshift_u8_rsplat(
312 // CHECK-NEXT: entry:
313 // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
314 // CHECK-NEXT: [[CONV:%.*]] = zext i8 [[B:%.*]] to i32
315 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i64 0
316 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
317 // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i8>
318 // CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i8> [[A]], [[SH_PROM]]
319 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> poison, <32 x i8> [[SHR]], i64 0)
320 // CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
322 fixed_uint8m1_t rshift_u8_rsplat(fixed_uint8m1_t a, uint8_t b) {
323 return a >> b;
326 // CHECK-LABEL: @rshift_u8_lsplat(
327 // CHECK-NEXT: entry:
328 // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
329 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
330 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
331 // CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i8> [[SPLAT_SPLAT]], [[A]]
332 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> poison, <32 x i8> [[SHR]], i64 0)
333 // CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
335 fixed_uint8m1_t rshift_u8_lsplat(fixed_uint8m1_t a, uint8_t b) {
336 return b >> a;
339 // CHECK-LABEL: @lshift_i16_rsplat(
340 // CHECK-NEXT: entry:
341 // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
342 // CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B:%.*]] to i32
343 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[CONV]], i64 0
344 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
345 // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i16>
346 // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i16> [[A]], [[SH_PROM]]
347 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> poison, <16 x i16> [[SHL]], i64 0)
348 // CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
350 fixed_int16m1_t lshift_i16_rsplat(fixed_int16m1_t a, int16_t b) {
351 return a << b;
354 // CHECK-LABEL: @lshift_i16_lsplat(
355 // CHECK-NEXT: entry:
356 // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
357 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
358 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
359 // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i16> [[SPLAT_SPLAT]], [[A]]
360 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> poison, <16 x i16> [[SHL]], i64 0)
361 // CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
363 fixed_int16m1_t lshift_i16_lsplat(fixed_int16m1_t a, int16_t b) {
364 return b << a;
367 // CHECK-LABEL: @rshift_i16_rsplat(
368 // CHECK-NEXT: entry:
369 // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
370 // CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B:%.*]] to i32
371 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[CONV]], i64 0
372 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
373 // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i16>
374 // CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i16> [[A]], [[SH_PROM]]
375 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> poison, <16 x i16> [[SHR]], i64 0)
376 // CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
378 fixed_int16m1_t rshift_i16_rsplat(fixed_int16m1_t a, int16_t b) {
379 return a >> b;
382 // CHECK-LABEL: @rshift_i16_lsplat(
383 // CHECK-NEXT: entry:
384 // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
385 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
386 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
387 // CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i16> [[SPLAT_SPLAT]], [[A]]
388 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> poison, <16 x i16> [[SHR]], i64 0)
389 // CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
391 fixed_int16m1_t rshift_i16_lsplat(fixed_int16m1_t a, int16_t b) {
392 return b >> a;
395 // CHECK-LABEL: @lshift_u16_rsplat(
396 // CHECK-NEXT: entry:
397 // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
398 // CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B:%.*]] to i32
399 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[CONV]], i64 0
400 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
401 // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i16>
402 // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i16> [[A]], [[SH_PROM]]
403 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> poison, <16 x i16> [[SHL]], i64 0)
404 // CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
406 fixed_uint16m1_t lshift_u16_rsplat(fixed_uint16m1_t a, uint16_t b) {
407 return a << b;
410 // CHECK-LABEL: @lshift_u16_lsplat(
411 // CHECK-NEXT: entry:
412 // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
413 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
414 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
415 // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i16> [[SPLAT_SPLAT]], [[A]]
416 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> poison, <16 x i16> [[SHL]], i64 0)
417 // CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
419 fixed_uint16m1_t lshift_u16_lsplat(fixed_uint16m1_t a, uint16_t b) {
420 return b << a;
423 // CHECK-LABEL: @rshift_u16_rsplat(
424 // CHECK-NEXT: entry:
425 // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
426 // CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B:%.*]] to i32
427 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[CONV]], i64 0
428 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
429 // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i16>
430 // CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i16> [[A]], [[SH_PROM]]
431 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> poison, <16 x i16> [[SHR]], i64 0)
432 // CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
434 fixed_uint16m1_t rshift_u16_rsplat(fixed_uint16m1_t a, uint16_t b) {
435 return a >> b;
438 // CHECK-LABEL: @rshift_u16_lsplat(
439 // CHECK-NEXT: entry:
440 // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
441 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
442 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
443 // CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i16> [[SPLAT_SPLAT]], [[A]]
444 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> poison, <16 x i16> [[SHR]], i64 0)
445 // CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
447 fixed_uint16m1_t rshift_u16_lsplat(fixed_uint16m1_t a, uint16_t b) {
448 return b >> a;
451 // CHECK-LABEL: @lshift_i32_rsplat(
452 // CHECK-NEXT: entry:
453 // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
454 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
455 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
456 // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i32> [[A]], [[SPLAT_SPLAT]]
457 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[SHL]], i64 0)
458 // CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
460 fixed_int32m1_t lshift_i32_rsplat(fixed_int32m1_t a, int32_t b) {
461 return a << b;
464 // CHECK-LABEL: @lshift_i32_lsplat(
465 // CHECK-NEXT: entry:
466 // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
467 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
468 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
469 // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i32> [[SPLAT_SPLAT]], [[A]]
470 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[SHL]], i64 0)
471 // CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
473 fixed_int32m1_t lshift_i32_lsplat(fixed_int32m1_t a, int32_t b) {
474 return b << a;
477 // CHECK-LABEL: @rshift_i32_rsplat(
478 // CHECK-NEXT: entry:
479 // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
480 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
481 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
482 // CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i32> [[A]], [[SPLAT_SPLAT]]
483 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[SHR]], i64 0)
484 // CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
486 fixed_int32m1_t rshift_i32_rsplat(fixed_int32m1_t a, int32_t b) {
487 return a >> b;
490 // CHECK-LABEL: @rshift_i32_lsplat(
491 // CHECK-NEXT: entry:
492 // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
493 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
494 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
495 // CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i32> [[SPLAT_SPLAT]], [[A]]
496 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[SHR]], i64 0)
497 // CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
499 fixed_int32m1_t rshift_i32_lsplat(fixed_int32m1_t a, int32_t b) {
500 return b >> a;
503 // CHECK-LABEL: @lshift_u32_rsplat(
504 // CHECK-NEXT: entry:
505 // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
506 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
507 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
508 // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i32> [[A]], [[SPLAT_SPLAT]]
509 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[SHL]], i64 0)
510 // CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
512 fixed_uint32m1_t lshift_u32_rsplat(fixed_uint32m1_t a, uint32_t b) {
513 return a << b;
516 // CHECK-LABEL: @lshift_u32_lsplat(
517 // CHECK-NEXT: entry:
518 // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
519 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
520 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
521 // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i32> [[SPLAT_SPLAT]], [[A]]
522 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[SHL]], i64 0)
523 // CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
525 fixed_uint32m1_t lshift_u32_lsplat(fixed_uint32m1_t a, uint32_t b) {
526 return b << a;
529 // CHECK-LABEL: @rshift_u32_rsplat(
530 // CHECK-NEXT: entry:
531 // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
532 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
533 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
534 // CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i32> [[A]], [[SPLAT_SPLAT]]
535 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[SHR]], i64 0)
536 // CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
538 fixed_uint32m1_t rshift_u32_rsplat(fixed_uint32m1_t a, uint32_t b) {
539 return a >> b;
542 // CHECK-LABEL: @rshift_u32_lsplat(
543 // CHECK-NEXT: entry:
544 // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
545 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
546 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
547 // CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i32> [[SPLAT_SPLAT]], [[A]]
548 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[SHR]], i64 0)
549 // CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
551 fixed_uint32m1_t rshift_u32_lsplat(fixed_uint32m1_t a, uint32_t b) {
552 return b >> a;
555 // CHECK-LABEL: @lshift_i64_rsplat(
556 // CHECK-NEXT: entry:
557 // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
558 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
559 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
560 // CHECK-NEXT: [[SHL:%.*]] = shl <4 x i64> [[A]], [[SPLAT_SPLAT]]
561 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> poison, <4 x i64> [[SHL]], i64 0)
562 // CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
564 fixed_int64m1_t lshift_i64_rsplat(fixed_int64m1_t a, int64_t b) {
565 return a << b;
568 // CHECK-LABEL: @lshift_i64_lsplat(
569 // CHECK-NEXT: entry:
570 // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
571 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
572 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
573 // CHECK-NEXT: [[SHL:%.*]] = shl <4 x i64> [[SPLAT_SPLAT]], [[A]]
574 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> poison, <4 x i64> [[SHL]], i64 0)
575 // CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
577 fixed_int64m1_t lshift_i64_lsplat(fixed_int64m1_t a, int64_t b) {
578 return b << a;
581 // CHECK-LABEL: @rshift_i64_rsplat(
582 // CHECK-NEXT: entry:
583 // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
584 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
585 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
586 // CHECK-NEXT: [[SHR:%.*]] = ashr <4 x i64> [[A]], [[SPLAT_SPLAT]]
587 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> poison, <4 x i64> [[SHR]], i64 0)
588 // CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
590 fixed_int64m1_t rshift_i64_rsplat(fixed_int64m1_t a, int64_t b) {
591 return a >> b;
594 // CHECK-LABEL: @rshift_i64_lsplat(
595 // CHECK-NEXT: entry:
596 // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
597 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
598 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
599 // CHECK-NEXT: [[SHR:%.*]] = ashr <4 x i64> [[SPLAT_SPLAT]], [[A]]
600 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> poison, <4 x i64> [[SHR]], i64 0)
601 // CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
603 fixed_int64m1_t rshift_i64_lsplat(fixed_int64m1_t a, int64_t b) {
604 return b >> a;
607 // CHECK-LABEL: @lshift_u64_rsplat(
608 // CHECK-NEXT: entry:
609 // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
610 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
611 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
612 // CHECK-NEXT: [[SHL:%.*]] = shl <4 x i64> [[A]], [[SPLAT_SPLAT]]
613 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> poison, <4 x i64> [[SHL]], i64 0)
614 // CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
616 fixed_uint64m1_t lshift_u64_rsplat(fixed_uint64m1_t a, uint64_t b) {
617 return a << b;
620 // CHECK-LABEL: @lshift_u64_lsplat(
621 // CHECK-NEXT: entry:
622 // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
623 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
624 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
625 // CHECK-NEXT: [[SHL:%.*]] = shl <4 x i64> [[SPLAT_SPLAT]], [[A]]
626 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> poison, <4 x i64> [[SHL]], i64 0)
627 // CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
629 fixed_uint64m1_t lshift_u64_lsplat(fixed_uint64m1_t a, uint64_t b) {
630 return b << a;
633 // CHECK-LABEL: @rshift_u64_rsplat(
634 // CHECK-NEXT: entry:
635 // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
636 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
637 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
638 // CHECK-NEXT: [[SHR:%.*]] = lshr <4 x i64> [[A]], [[SPLAT_SPLAT]]
639 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> poison, <4 x i64> [[SHR]], i64 0)
640 // CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
642 fixed_uint64m1_t rshift_u64_rsplat(fixed_uint64m1_t a, uint64_t b) {
643 return a >> b;
646 // CHECK-LABEL: @rshift_u64_lsplat(
647 // CHECK-NEXT: entry:
648 // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
649 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
650 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
651 // CHECK-NEXT: [[SHR:%.*]] = lshr <4 x i64> [[SPLAT_SPLAT]], [[A]]
652 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> poison, <4 x i64> [[SHR]], i64 0)
653 // CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
655 fixed_uint64m1_t rshift_u64_lsplat(fixed_uint64m1_t a, uint64_t b) {
656 return b >> a;