1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple riscv64-none-linux-gnu -target-feature +f -target-feature +d -target-feature +zve64d -mvscale-min=4 -mvscale-max=4 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s
4 // REQUIRES: riscv-registered-target
6 #include <riscv_vector.h>
8 typedef __rvv_int8m1_t vint8m1_t
;
9 typedef __rvv_uint8m1_t vuint8m1_t
;
10 typedef __rvv_int16m1_t vint16m1_t
;
11 typedef __rvv_uint16m1_t vuint16m1_t
;
12 typedef __rvv_int32m1_t vint32m1_t
;
13 typedef __rvv_uint32m1_t vuint32m1_t
;
14 typedef __rvv_int64m1_t vint64m1_t
;
15 typedef __rvv_uint64m1_t vuint64m1_t
;
16 typedef __rvv_float32m1_t vfloat32m1_t
;
17 typedef __rvv_float64m1_t vfloat64m1_t
;
19 typedef __rvv_int8m2_t vint8m2_t
;
20 typedef __rvv_uint8m2_t vuint8m2_t
;
21 typedef __rvv_int16m2_t vint16m2_t
;
22 typedef __rvv_uint16m2_t vuint16m2_t
;
23 typedef __rvv_int32m2_t vint32m2_t
;
24 typedef __rvv_uint32m2_t vuint32m2_t
;
25 typedef __rvv_int64m2_t vint64m2_t
;
26 typedef __rvv_uint64m2_t vuint64m2_t
;
27 typedef __rvv_float32m2_t vfloat32m2_t
;
28 typedef __rvv_float64m2_t vfloat64m2_t
;
30 typedef __rvv_bool1_t vbool1_t
;
31 typedef __rvv_bool4_t vbool4_t
;
32 typedef __rvv_bool32_t vbool32_t
;
34 typedef vint32m1_t fixed_int32m1_t
__attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen
)));
35 typedef vint32m2_t fixed_int32m2_t
__attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen
* 2)));
36 typedef vint16m4_t fixed_int16m4_t
__attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen
* 4)));
37 typedef vint8m8_t fixed_int8m8_t
__attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen
* 8)));
38 typedef vbool1_t fixed_bool1_t
__attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen
)));
39 typedef vbool4_t fixed_bool4_t
__attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen
/4)));
40 typedef vbool32_t fixed_bool32_t
__attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen
/32)));
42 fixed_int32m1_t global_vec
;
43 fixed_int32m2_t global_vec_m2
;
44 fixed_int8m8_t global_vec_int8m8
;
45 fixed_int16m4_t global_vec_int16m4
;
46 fixed_bool1_t global_bool1
;
47 fixed_bool4_t global_bool4
;
48 fixed_bool32_t global_bool32
;
50 // CHECK-LABEL: @test_bool1(
52 // CHECK-NEXT: [[RETVAL:%.*]] = alloca <256 x i8>, align 8
53 // CHECK-NEXT: [[M_ADDR:%.*]] = alloca <vscale x 64 x i1>, align 1
54 // CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca <vscale x 64 x i8>, align 1
55 // CHECK-NEXT: [[MASK:%.*]] = alloca <vscale x 64 x i1>, align 1
56 // CHECK-NEXT: store <vscale x 64 x i1> [[M:%.*]], ptr [[M_ADDR]], align 1
57 // CHECK-NEXT: store <vscale x 64 x i8> [[VEC:%.*]], ptr [[VEC_ADDR]], align 1
58 // CHECK-NEXT: [[TMP0:%.*]] = load <vscale x 64 x i1>, ptr [[M_ADDR]], align 1
59 // CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @global_bool1, align 8
60 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> poison, <32 x i8> [[TMP1]], i64 0)
61 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
62 // CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[TMP0]], <vscale x 64 x i1> [[TMP2]], i64 256)
63 // CHECK-NEXT: store <vscale x 64 x i1> [[TMP3]], ptr [[MASK]], align 1
64 // CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 64 x i1>, ptr [[MASK]], align 1
65 // CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 64 x i8>, ptr [[VEC_ADDR]], align 1
66 // CHECK-NEXT: [[TMP6:%.*]] = load <256 x i8>, ptr @global_vec_int8m8, align 8
67 // CHECK-NEXT: [[CAST_SCALABLE1:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.v256i8(<vscale x 64 x i8> poison, <256 x i8> [[TMP6]], i64 0)
68 // CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[TMP5]], <vscale x 64 x i8> [[CAST_SCALABLE1]], <vscale x 64 x i1> [[TMP4]], i64 256, i64 3)
69 // CHECK-NEXT: [[CAST_FIXED:%.*]] = call <256 x i8> @llvm.vector.extract.v256i8.nxv64i8(<vscale x 64 x i8> [[TMP7]], i64 0)
70 // CHECK-NEXT: store <256 x i8> [[CAST_FIXED]], ptr [[RETVAL]], align 8
71 // CHECK-NEXT: [[TMP8:%.*]] = load <256 x i8>, ptr [[RETVAL]], align 8
72 // CHECK-NEXT: [[CAST_SCALABLE2:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.v256i8(<vscale x 64 x i8> poison, <256 x i8> [[TMP8]], i64 0)
73 // CHECK-NEXT: ret <vscale x 64 x i8> [[CAST_SCALABLE2]]
75 fixed_int8m8_t
test_bool1(vbool1_t m
, vint8m8_t vec
) {
76 vbool1_t mask
= __riscv_vmand(m
, global_bool1
, __riscv_v_fixed_vlen
);
77 return __riscv_vadd(mask
, vec
, global_vec_int8m8
, __riscv_v_fixed_vlen
);
80 // CHECK-LABEL: @test_bool4(
82 // CHECK-NEXT: [[RETVAL:%.*]] = alloca <64 x i16>, align 8
83 // CHECK-NEXT: [[M_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 1
84 // CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca <vscale x 16 x i16>, align 2
85 // CHECK-NEXT: [[MASK:%.*]] = alloca <vscale x 16 x i1>, align 1
86 // CHECK-NEXT: store <vscale x 16 x i1> [[M:%.*]], ptr [[M_ADDR]], align 1
87 // CHECK-NEXT: store <vscale x 16 x i16> [[VEC:%.*]], ptr [[VEC_ADDR]], align 2
88 // CHECK-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[M_ADDR]], align 1
89 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @global_bool4, align 8
90 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> poison, <8 x i8> [[TMP1]], i64 0)
91 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 2 x i8> [[CAST_SCALABLE]] to <vscale x 16 x i1>
92 // CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1.i64(<vscale x 16 x i1> [[TMP0]], <vscale x 16 x i1> [[TMP2]], i64 64)
93 // CHECK-NEXT: store <vscale x 16 x i1> [[TMP3]], ptr [[MASK]], align 1
94 // CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 16 x i1>, ptr [[MASK]], align 1
95 // CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 16 x i16>, ptr [[VEC_ADDR]], align 2
96 // CHECK-NEXT: [[TMP6:%.*]] = load <64 x i16>, ptr @global_vec_int16m4, align 8
97 // CHECK-NEXT: [[CAST_SCALABLE1:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.v64i16(<vscale x 16 x i16> poison, <64 x i16> [[TMP6]], i64 0)
98 // CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[TMP5]], <vscale x 16 x i16> [[CAST_SCALABLE1]], <vscale x 16 x i1> [[TMP4]], i64 64, i64 3)
99 // CHECK-NEXT: [[CAST_FIXED:%.*]] = call <64 x i16> @llvm.vector.extract.v64i16.nxv16i16(<vscale x 16 x i16> [[TMP7]], i64 0)
100 // CHECK-NEXT: store <64 x i16> [[CAST_FIXED]], ptr [[RETVAL]], align 8
101 // CHECK-NEXT: [[TMP8:%.*]] = load <64 x i16>, ptr [[RETVAL]], align 8
102 // CHECK-NEXT: [[CAST_SCALABLE2:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.v64i16(<vscale x 16 x i16> poison, <64 x i16> [[TMP8]], i64 0)
103 // CHECK-NEXT: ret <vscale x 16 x i16> [[CAST_SCALABLE2]]
105 fixed_int16m4_t
test_bool4(vbool4_t m
, vint16m4_t vec
) {
106 vbool4_t mask
= __riscv_vmand(m
, global_bool4
, __riscv_v_fixed_vlen
/4);
107 return __riscv_vadd(mask
, vec
, global_vec_int16m4
, __riscv_v_fixed_vlen
/4);
110 // CHECK-LABEL: @test_bool32(
111 // CHECK-NEXT: entry:
112 // CHECK-NEXT: [[RETVAL:%.*]] = alloca <8 x i32>, align 8
113 // CHECK-NEXT: [[M_ADDR:%.*]] = alloca <vscale x 2 x i1>, align 1
114 // CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca <vscale x 2 x i32>, align 4
115 // CHECK-NEXT: [[MASK:%.*]] = alloca <vscale x 2 x i1>, align 1
116 // CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca <1 x i8>, align 1
117 // CHECK-NEXT: store <vscale x 2 x i1> [[M:%.*]], ptr [[M_ADDR]], align 1
118 // CHECK-NEXT: store <vscale x 2 x i32> [[VEC:%.*]], ptr [[VEC_ADDR]], align 4
119 // CHECK-NEXT: [[TMP0:%.*]] = load <vscale x 2 x i1>, ptr [[M_ADDR]], align 1
120 // CHECK-NEXT: [[TMP1:%.*]] = load <1 x i8>, ptr @global_bool32, align 1
121 // CHECK-NEXT: store <1 x i8> [[TMP1]], ptr [[SAVED_VALUE]], align 1
122 // CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 2 x i1>, ptr [[SAVED_VALUE]], align 1
123 // CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i1> [[TMP2]], i64 8)
124 // CHECK-NEXT: store <vscale x 2 x i1> [[TMP3]], ptr [[MASK]], align 1
125 // CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 2 x i1>, ptr [[MASK]], align 1
126 // CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 2 x i32>, ptr [[VEC_ADDR]], align 4
127 // CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr @global_vec, align 8
128 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[TMP6]], i64 0)
129 // CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[TMP5]], <vscale x 2 x i32> [[CAST_SCALABLE]], <vscale x 2 x i1> [[TMP4]], i64 8, i64 3)
130 // CHECK-NEXT: [[CAST_FIXED:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[TMP7]], i64 0)
131 // CHECK-NEXT: store <8 x i32> [[CAST_FIXED]], ptr [[RETVAL]], align 8
132 // CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr [[RETVAL]], align 8
133 // CHECK-NEXT: [[CAST_SCALABLE1:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[TMP8]], i64 0)
134 // CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE1]]
136 fixed_int32m1_t
test_bool32(vbool32_t m
, vint32m1_t vec
) {
137 vbool32_t mask
= __riscv_vmand(m
, global_bool32
, __riscv_v_fixed_vlen
/32);
138 return __riscv_vadd(mask
, vec
, global_vec
, __riscv_v_fixed_vlen
/32);
141 // CHECK-LABEL: @test_ptr_to_global(
142 // CHECK-NEXT: entry:
143 // CHECK-NEXT: [[RETVAL:%.*]] = alloca <8 x i32>, align 8
144 // CHECK-NEXT: [[GLOBAL_VEC_PTR:%.*]] = alloca ptr, align 8
145 // CHECK-NEXT: store ptr @global_vec, ptr [[GLOBAL_VEC_PTR]], align 8
146 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[GLOBAL_VEC_PTR]], align 8
147 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr [[TMP0]], align 8
148 // CHECK-NEXT: store <8 x i32> [[TMP1]], ptr [[RETVAL]], align 8
149 // CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr [[RETVAL]], align 8
150 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[TMP2]], i64 0)
151 // CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
153 fixed_int32m1_t
test_ptr_to_global() {
154 fixed_int32m1_t
*global_vec_ptr
;
155 global_vec_ptr
= &global_vec
;
156 return *global_vec_ptr
;
160 // Test casting pointer from fixed-length array to scalable vector.
161 // CHECK-LABEL: @array_arg(
162 // CHECK-NEXT: entry:
163 // CHECK-NEXT: [[RETVAL:%.*]] = alloca <8 x i32>, align 8
164 // CHECK-NEXT: [[ARR_ADDR:%.*]] = alloca ptr, align 8
165 // CHECK-NEXT: store ptr [[ARR:%.*]], ptr [[ARR_ADDR]], align 8
166 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARR_ADDR]], align 8
167 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds <8 x i32>, ptr [[TMP0]], i64 0
168 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr [[ARRAYIDX]], align 8
169 // CHECK-NEXT: store <8 x i32> [[TMP1]], ptr [[RETVAL]], align 8
170 // CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr [[RETVAL]], align 8
171 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[TMP2]], i64 0)
172 // CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
174 fixed_int32m1_t
array_arg(fixed_int32m1_t arr
[]) {
178 // CHECK-LABEL: @address_of_array_idx_bool1(
179 // CHECK-NEXT: entry:
180 // CHECK-NEXT: [[RETVAL:%.*]] = alloca <32 x i8>, align 8
181 // CHECK-NEXT: [[ARR:%.*]] = alloca [3 x <32 x i8>], align 8
182 // CHECK-NEXT: [[PARR:%.*]] = alloca ptr, align 8
183 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <32 x i8>], ptr [[ARR]], i64 0, i64 0
184 // CHECK-NEXT: store ptr [[ARRAYIDX]], ptr [[PARR]], align 8
185 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PARR]], align 8
186 // CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr [[TMP0]], align 8
187 // CHECK-NEXT: store <32 x i8> [[TMP1]], ptr [[RETVAL]], align 8
188 // CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr [[RETVAL]], align 8
189 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> poison, <32 x i8> [[TMP2]], i64 0)
190 // CHECK-NEXT: [[TMP3:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
191 // CHECK-NEXT: ret <vscale x 64 x i1> [[TMP3]]
193 fixed_bool1_t
address_of_array_idx_bool1() {
194 fixed_bool1_t arr
[3];
200 // CHECK-LABEL: @address_of_array_idx_bool4(
201 // CHECK-NEXT: entry:
202 // CHECK-NEXT: [[RETVAL:%.*]] = alloca <8 x i8>, align 8
203 // CHECK-NEXT: [[ARR:%.*]] = alloca [3 x <8 x i8>], align 8
204 // CHECK-NEXT: [[PARR:%.*]] = alloca ptr, align 8
205 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[ARR]], i64 0, i64 0
206 // CHECK-NEXT: store ptr [[ARRAYIDX]], ptr [[PARR]], align 8
207 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PARR]], align 8
208 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[TMP0]], align 8
209 // CHECK-NEXT: store <8 x i8> [[TMP1]], ptr [[RETVAL]], align 8
210 // CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr [[RETVAL]], align 8
211 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> poison, <8 x i8> [[TMP2]], i64 0)
212 // CHECK-NEXT: [[TMP3:%.*]] = bitcast <vscale x 2 x i8> [[CAST_SCALABLE]] to <vscale x 16 x i1>
213 // CHECK-NEXT: ret <vscale x 16 x i1> [[TMP3]]
215 fixed_bool4_t
address_of_array_idx_bool4() {
216 fixed_bool4_t arr
[3];
222 // CHECK-LABEL: @address_of_array_idx_bool32(
223 // CHECK-NEXT: entry:
224 // CHECK-NEXT: [[RETVAL:%.*]] = alloca <1 x i8>, align 1
225 // CHECK-NEXT: [[ARR:%.*]] = alloca [3 x <1 x i8>], align 1
226 // CHECK-NEXT: [[PARR:%.*]] = alloca ptr, align 8
227 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x i1>, align 1
228 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <1 x i8>], ptr [[ARR]], i64 0, i64 0
229 // CHECK-NEXT: store ptr [[ARRAYIDX]], ptr [[PARR]], align 8
230 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PARR]], align 8
231 // CHECK-NEXT: [[TMP1:%.*]] = load <1 x i8>, ptr [[TMP0]], align 1
232 // CHECK-NEXT: store <1 x i8> [[TMP1]], ptr [[RETVAL]], align 1
233 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[RETVAL_COERCE]], ptr align 1 [[RETVAL]], i64 1, i1 false)
234 // CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 2 x i1>, ptr [[RETVAL_COERCE]], align 1
235 // CHECK-NEXT: ret <vscale x 2 x i1> [[TMP2]]
237 fixed_bool32_t
address_of_array_idx_bool32() {
238 fixed_bool32_t arr
[3];
239 fixed_bool32_t
*parr
;
244 // CHECK-LABEL: @test_cast(
245 // CHECK-NEXT: entry:
246 // CHECK-NEXT: [[RETVAL:%.*]] = alloca <8 x i32>, align 8
247 // CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca <vscale x 2 x i32>, align 4
248 // CHECK-NEXT: store <vscale x 2 x i32> [[VEC:%.*]], ptr [[VEC_ADDR]], align 4
249 // CHECK-NEXT: [[TMP0:%.*]] = load <8 x i32>, ptr @global_vec, align 8
250 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[TMP0]], i64 0)
251 // CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 2 x i32>, ptr [[VEC_ADDR]], align 4
252 // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[CAST_SCALABLE]], <vscale x 2 x i32> [[TMP1]], i64 8)
253 // CHECK-NEXT: [[CAST_FIXED:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[TMP2]], i64 0)
254 // CHECK-NEXT: store <8 x i32> [[CAST_FIXED]], ptr [[RETVAL]], align 8
255 // CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[RETVAL]], align 8
256 // CHECK-NEXT: [[CAST_SCALABLE1:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[TMP3]], i64 0)
257 // CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE1]]
259 fixed_int32m1_t
test_cast(vint32m1_t vec
) {
260 return __riscv_vadd(global_vec
, vec
, __riscv_v_fixed_vlen
/32);
263 // CHECK-LABEL: @test_ptr_to_global_m2(
264 // CHECK-NEXT: entry:
265 // CHECK-NEXT: [[RETVAL:%.*]] = alloca <16 x i32>, align 8
266 // CHECK-NEXT: [[GLOBAL_VEC_PTR:%.*]] = alloca ptr, align 8
267 // CHECK-NEXT: store ptr @global_vec_m2, ptr [[GLOBAL_VEC_PTR]], align 8
268 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[GLOBAL_VEC_PTR]], align 8
269 // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr [[TMP0]], align 8
270 // CHECK-NEXT: store <16 x i32> [[TMP1]], ptr [[RETVAL]], align 8
271 // CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr [[RETVAL]], align 8
272 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> poison, <16 x i32> [[TMP2]], i64 0)
273 // CHECK-NEXT: ret <vscale x 4 x i32> [[CAST_SCALABLE]]
275 fixed_int32m2_t
test_ptr_to_global_m2() {
276 fixed_int32m2_t
*global_vec_ptr
;
277 global_vec_ptr
= &global_vec_m2
;
278 return *global_vec_ptr
;
282 // Test casting pointer from fixed-length array to scalable vector.
283 // CHECK-LABEL: @array_arg_m2(
284 // CHECK-NEXT: entry:
285 // CHECK-NEXT: [[RETVAL:%.*]] = alloca <16 x i32>, align 8
286 // CHECK-NEXT: [[ARR_ADDR:%.*]] = alloca ptr, align 8
287 // CHECK-NEXT: store ptr [[ARR:%.*]], ptr [[ARR_ADDR]], align 8
288 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARR_ADDR]], align 8
289 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds <16 x i32>, ptr [[TMP0]], i64 0
290 // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr [[ARRAYIDX]], align 8
291 // CHECK-NEXT: store <16 x i32> [[TMP1]], ptr [[RETVAL]], align 8
292 // CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr [[RETVAL]], align 8
293 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> poison, <16 x i32> [[TMP2]], i64 0)
294 // CHECK-NEXT: ret <vscale x 4 x i32> [[CAST_SCALABLE]]
296 fixed_int32m2_t
array_arg_m2(fixed_int32m2_t arr
[]) {
300 // CHECK-LABEL: @test_cast_m2(
301 // CHECK-NEXT: entry:
302 // CHECK-NEXT: [[RETVAL:%.*]] = alloca <16 x i32>, align 8
303 // CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 4
304 // CHECK-NEXT: store <vscale x 4 x i32> [[VEC:%.*]], ptr [[VEC_ADDR]], align 4
305 // CHECK-NEXT: [[TMP0:%.*]] = load <16 x i32>, ptr @global_vec_m2, align 8
306 // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> poison, <16 x i32> [[TMP0]], i64 0)
307 // CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[VEC_ADDR]], align 4
308 // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[CAST_SCALABLE]], <vscale x 4 x i32> [[TMP1]], i64 16)
309 // CHECK-NEXT: [[CAST_FIXED:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP2]], i64 0)
310 // CHECK-NEXT: store <16 x i32> [[CAST_FIXED]], ptr [[RETVAL]], align 8
311 // CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr [[RETVAL]], align 8
312 // CHECK-NEXT: [[CAST_SCALABLE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> poison, <16 x i32> [[TMP3]], i64 0)
313 // CHECK-NEXT: ret <vscale x 4 x i32> [[CAST_SCALABLE1]]
315 fixed_int32m2_t
test_cast_m2(vint32m2_t vec
) {
316 return __riscv_vadd(global_vec_m2
, vec
, __riscv_v_fixed_vlen
/16);