1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v \
4 // RUN: -target-feature +zvfhmin -disable-O0-optnone \
5 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
6 // RUN: FileCheck --check-prefix=CHECK-ZVFHMIN %s
8 #include <riscv_vector.h>
10 // CHECK-ZVFHMIN-LABEL: @test_vfncvt_f_f_w_f16m1(
11 // CHECK-ZVFHMIN-NEXT: entry:
12 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64(<vscale x 4 x half> poison, <vscale x 4 x float> [[SRC:%.*]], i64 7, i64 [[VL:%.*]])
13 // CHECK-ZVFHMIN-NEXT: ret <vscale x 4 x half> [[TMP0]]
15 vfloat16m1_t
test_vfncvt_f_f_w_f16m1(vfloat32m2_t src
, size_t vl
) {
16 return __riscv_vfncvt_f(src
, vl
);
20 // CHECK-ZVFHMIN-LABEL: @test_vfwcvt_f_f_v_f16m1(
21 // CHECK-ZVFHMIN-NEXT: entry:
22 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16.i64(<vscale x 4 x float> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
23 // CHECK-ZVFHMIN-NEXT: ret <vscale x 4 x float> [[TMP0]]
25 vfloat32m2_t
test_vfwcvt_f_f_v_f16m1(vfloat16m1_t src
, size_t vl
) {
26 return __riscv_vfwcvt_f(src
, vl
);
29 // CHECK-ZVFHMIN-LABEL: @test_vle16_v_f16m1(
30 // CHECK-ZVFHMIN-NEXT: entry:
31 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vle.nxv4f16.i64(<vscale x 4 x half> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
32 // CHECK-ZVFHMIN-NEXT: ret <vscale x 4 x half> [[TMP0]]
34 vfloat16m1_t
test_vle16_v_f16m1(const _Float16
*base
, size_t vl
) {
35 return __riscv_vle16_v_f16m1(base
, vl
);
38 // CHECK-ZVFHMIN-LABEL: @test_vse16_v_f16m1(
39 // CHECK-ZVFHMIN-NEXT: entry:
40 // CHECK-ZVFHMIN-NEXT: call void @llvm.riscv.vse.nxv4f16.i64(<vscale x 4 x half> [[VALUE:%.*]], ptr [[BASE:%.*]], i64 [[VL:%.*]])
41 // CHECK-ZVFHMIN-NEXT: ret void
43 void test_vse16_v_f16m1(_Float16
*base
, vfloat16m1_t value
, size_t vl
) {
44 return __riscv_vse16_v_f16m1(base
, value
, vl
);
47 // CHECK-ZVFHMIN-LABEL: @test_vlse16_v_f16m1(
48 // CHECK-ZVFHMIN-NEXT: entry:
49 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16.i64(<vscale x 4 x half> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
50 // CHECK-ZVFHMIN-NEXT: ret <vscale x 4 x half> [[TMP0]]
52 vfloat16m1_t
test_vlse16_v_f16m1(const _Float16
*base
, ptrdiff_t bstride
, size_t vl
) {
53 return __riscv_vlse16_v_f16m1(base
, bstride
, vl
);
56 // CHECK-ZVFHMIN-LABEL: @test_vsse16_v_f16m1(
57 // CHECK-ZVFHMIN-NEXT: entry:
58 // CHECK-ZVFHMIN-NEXT: call void @llvm.riscv.vsse.nxv4f16.i64(<vscale x 4 x half> [[VALUE:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
59 // CHECK-ZVFHMIN-NEXT: ret void
61 void test_vsse16_v_f16m1(_Float16
*base
, ptrdiff_t bstride
, vfloat16m1_t value
, size_t vl
) {
62 return __riscv_vsse16_v_f16m1(base
, bstride
, value
, vl
);
65 // CHECK-ZVFHMIN-LABEL: @test_vluxei32_v_f16m1(
66 // CHECK-ZVFHMIN-NEXT: entry:
67 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
68 // CHECK-ZVFHMIN-NEXT: ret <vscale x 4 x half> [[TMP0]]
70 vfloat16m1_t
test_vluxei32_v_f16m1(const _Float16
*base
, vuint32m2_t bindex
, size_t vl
) {
71 return __riscv_vluxei32_v_f16m1(base
, bindex
, vl
);
74 // CHECK-ZVFHMIN-LABEL: @test_vsuxei32_v_f16m1(
75 // CHECK-ZVFHMIN-NEXT: entry:
76 // CHECK-ZVFHMIN-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[VALUE:%.*]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
77 // CHECK-ZVFHMIN-NEXT: ret void
79 void test_vsuxei32_v_f16m1(_Float16
*base
, vuint32m2_t bindex
, vfloat16m1_t value
, size_t vl
) {
80 return __riscv_vsuxei32_v_f16m1(base
, bindex
, value
, vl
);
83 // CHECK-ZVFHMIN-LABEL: @test_vloxei32_v_f16m1(
84 // CHECK-ZVFHMIN-NEXT: entry:
85 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
86 // CHECK-ZVFHMIN-NEXT: ret <vscale x 4 x half> [[TMP0]]
88 vfloat16m1_t
test_vloxei32_v_f16m1(const _Float16
*base
, vuint32m2_t bindex
, size_t vl
) {
89 return __riscv_vloxei32_v_f16m1(base
, bindex
, vl
);
92 // CHECK-ZVFHMIN-LABEL: @test_vsoxei32_v_f16m1(
93 // CHECK-ZVFHMIN-NEXT: entry:
94 // CHECK-ZVFHMIN-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[VALUE:%.*]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
95 // CHECK-ZVFHMIN-NEXT: ret void
97 void test_vsoxei32_v_f16m1(_Float16
*base
, vuint32m2_t bindex
, vfloat16m1_t value
, size_t vl
) {
98 return __riscv_vsoxei32_v_f16m1(base
, bindex
, value
, vl
);
101 // CHECK-ZVFHMIN-LABEL: @test_vle16ff_v_f16m1(
102 // CHECK-ZVFHMIN-NEXT: entry:
103 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, i64 } @llvm.riscv.vleff.nxv4f16.i64(<vscale x 4 x half> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
104 // CHECK-ZVFHMIN-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, i64 } [[TMP0]], 0
105 // CHECK-ZVFHMIN-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, i64 } [[TMP0]], 1
106 // CHECK-ZVFHMIN-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8
107 // CHECK-ZVFHMIN-NEXT: ret <vscale x 4 x half> [[TMP1]]
109 vfloat16m1_t
test_vle16ff_v_f16m1(const _Float16
*base
, size_t *new_vl
, size_t vl
) {
110 return __riscv_vle16ff_v_f16m1(base
, new_vl
, vl
);
113 // CHECK-ZVFHMIN-LABEL: @test_vlseg2e16_v_f16m1x2(
114 // CHECK-ZVFHMIN-NEXT: entry:
115 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg2.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
116 // CHECK-ZVFHMIN-NEXT: ret { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]]
118 vfloat16m1x2_t
test_vlseg2e16_v_f16m1x2(const _Float16
*base
, size_t vl
) {
119 return __riscv_vlseg2e16_v_f16m1x2(base
, vl
);
122 // CHECK-ZVFHMIN-LABEL: @test_vlseg2e16ff_v_f16m1x2(
123 // CHECK-ZVFHMIN-NEXT: entry:
124 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, i64 } @llvm.riscv.vlseg2ff.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
125 // CHECK-ZVFHMIN-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 0
126 // CHECK-ZVFHMIN-NEXT: [[TMP2:%.*]] = insertvalue { <vscale x 4 x half>, <vscale x 4 x half> } poison, <vscale x 4 x half> [[TMP1]], 0
127 // CHECK-ZVFHMIN-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 1
128 // CHECK-ZVFHMIN-NEXT: [[TMP4:%.*]] = insertvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP2]], <vscale x 4 x half> [[TMP3]], 1
129 // CHECK-ZVFHMIN-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 2
130 // CHECK-ZVFHMIN-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 8
131 // CHECK-ZVFHMIN-NEXT: ret { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP4]]
133 vfloat16m1x2_t
test_vlseg2e16ff_v_f16m1x2(const _Float16
*base
, size_t *new_vl
, size_t vl
) {
134 return __riscv_vlseg2e16ff_v_f16m1x2(base
, new_vl
, vl
);
137 // CHECK-ZVFHMIN-LABEL: @test_vlsseg2e16_v_f16m1x2(
138 // CHECK-ZVFHMIN-NEXT: entry:
139 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlsseg2.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
140 // CHECK-ZVFHMIN-NEXT: ret { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]]
142 vfloat16m1x2_t
test_vlsseg2e16_v_f16m1x2(const _Float16
*base
, ptrdiff_t bstride
, size_t vl
) {
143 return __riscv_vlsseg2e16_v_f16m1x2(base
, bstride
, vl
);
146 // CHECK-ZVFHMIN-LABEL: @test_vluxseg2ei32_v_f16m1x2(
147 // CHECK-ZVFHMIN-NEXT: entry:
148 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg2.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
149 // CHECK-ZVFHMIN-NEXT: ret { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]]
151 vfloat16m1x2_t
test_vluxseg2ei32_v_f16m1x2(const _Float16
*base
, vuint32m2_t bindex
, size_t vl
) {
152 return __riscv_vluxseg2ei32_v_f16m1x2(base
, bindex
, vl
);
155 // CHECK-ZVFHMIN-LABEL: @test_vloxseg2ei32_v_f16m1x2(
156 // CHECK-ZVFHMIN-NEXT: entry:
157 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg2.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
158 // CHECK-ZVFHMIN-NEXT: ret { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]]
160 vfloat16m1x2_t
test_vloxseg2ei32_v_f16m1x2(const _Float16
*base
, vuint32m2_t bindex
, size_t vl
) {
161 return __riscv_vloxseg2ei32_v_f16m1x2(base
, bindex
, vl
);
164 // CHECK-ZVFHMIN-LABEL: @test_vmerge_vvm_f16m1(
165 // CHECK-ZVFHMIN-NEXT: entry:
166 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
167 // CHECK-ZVFHMIN-NEXT: ret <vscale x 4 x half> [[TMP0]]
169 vfloat16m1_t
test_vmerge_vvm_f16m1(vfloat16m1_t op1
, vfloat16m1_t op2
, vbool16_t mask
, size_t vl
) {
170 return __riscv_vmerge(op1
, op2
, mask
, vl
);
173 // CHECK-ZVFHMIN-LABEL: @test_vmv_v_v_f16m1(
174 // CHECK-ZVFHMIN-NEXT: entry:
175 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vmv.v.v.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
176 // CHECK-ZVFHMIN-NEXT: ret <vscale x 4 x half> [[TMP0]]
178 vfloat16m1_t
test_vmv_v_v_f16m1(vfloat16m1_t src
, size_t vl
) {
179 return __riscv_vmv_v(src
, vl
);
182 // CHECK-ZVFHMIN-LABEL: @test_vreinterpret_v_f16m1_i16m1(
183 // CHECK-ZVFHMIN-NEXT: entry:
184 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = bitcast <vscale x 4 x half> [[SRC:%.*]] to <vscale x 4 x i16>
185 // CHECK-ZVFHMIN-NEXT: ret <vscale x 4 x i16> [[TMP0]]
187 vint16m1_t
test_vreinterpret_v_f16m1_i16m1(vfloat16m1_t src
) {
188 return __riscv_vreinterpret_v_f16m1_i16m1(src
);
191 // CHECK-ZVFHMIN-LABEL: @test_vreinterpret_v_f16m1_u16m1(
192 // CHECK-ZVFHMIN-NEXT: entry:
193 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = bitcast <vscale x 4 x half> [[SRC:%.*]] to <vscale x 4 x i16>
194 // CHECK-ZVFHMIN-NEXT: ret <vscale x 4 x i16> [[TMP0]]
196 vuint16m1_t
test_vreinterpret_v_f16m1_u16m1(vfloat16m1_t src
) {
197 return __riscv_vreinterpret_v_f16m1_u16m1(src
);
200 // CHECK-ZVFHMIN-LABEL: @test_vreinterpret_v_i16m1_f16m1(
201 // CHECK-ZVFHMIN-NEXT: entry:
202 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 4 x half>
203 // CHECK-ZVFHMIN-NEXT: ret <vscale x 4 x half> [[TMP0]]
205 vfloat16m1_t
test_vreinterpret_v_i16m1_f16m1(vint16m1_t src
) {
206 return __riscv_vreinterpret_v_i16m1_f16m1(src
);
209 // CHECK-ZVFHMIN-LABEL: @test_vreinterpret_v_u16m1_f16m1(
210 // CHECK-ZVFHMIN-NEXT: entry:
211 // CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 4 x half>
212 // CHECK-ZVFHMIN-NEXT: ret <vscale x 4 x half> [[TMP0]]
214 vfloat16m1_t
test_vreinterpret_v_u16m1_f16m1(vuint16m1_t src
) {
215 return __riscv_vreinterpret_v_u16m1_f16m1(src
);