1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \
4 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
5 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
7 #include <riscv_vector.h>
9 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsext_vf8_i64m1_tu
10 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
11 // CHECK-RV64-NEXT: entry:
12 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[VL]])
13 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
15 vint64m1_t
test_vsext_vf8_i64m1_tu(vint64m1_t maskedoff
, vint8mf8_t op1
, size_t vl
) {
16 return __riscv_vsext_vf8_tu(maskedoff
, op1
, vl
);
19 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsext_vf8_i64m2_tu
20 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
21 // CHECK-RV64-NEXT: entry:
22 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[VL]])
23 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
25 vint64m2_t
test_vsext_vf8_i64m2_tu(vint64m2_t maskedoff
, vint8mf4_t op1
, size_t vl
) {
26 return __riscv_vsext_vf8_tu(maskedoff
, op1
, vl
);
29 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsext_vf8_i64m4_tu
30 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
31 // CHECK-RV64-NEXT: entry:
32 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[VL]])
33 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
35 vint64m4_t
test_vsext_vf8_i64m4_tu(vint64m4_t maskedoff
, vint8mf2_t op1
, size_t vl
) {
36 return __riscv_vsext_vf8_tu(maskedoff
, op1
, vl
);
39 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsext_vf8_i64m8_tu
40 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
41 // CHECK-RV64-NEXT: entry:
42 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[VL]])
43 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
45 vint64m8_t
test_vsext_vf8_i64m8_tu(vint64m8_t maskedoff
, vint8m1_t op1
, size_t vl
) {
46 return __riscv_vsext_vf8_tu(maskedoff
, op1
, vl
);
49 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsext_vf8_i64m1_tum
50 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
51 // CHECK-RV64-NEXT: entry:
52 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
53 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
55 vint64m1_t
test_vsext_vf8_i64m1_tum(vbool64_t mask
, vint64m1_t maskedoff
, vint8mf8_t op1
, size_t vl
) {
56 return __riscv_vsext_vf8_tum(mask
, maskedoff
, op1
, vl
);
59 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsext_vf8_i64m2_tum
60 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
61 // CHECK-RV64-NEXT: entry:
62 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
63 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
65 vint64m2_t
test_vsext_vf8_i64m2_tum(vbool32_t mask
, vint64m2_t maskedoff
, vint8mf4_t op1
, size_t vl
) {
66 return __riscv_vsext_vf8_tum(mask
, maskedoff
, op1
, vl
);
69 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsext_vf8_i64m4_tum
70 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
71 // CHECK-RV64-NEXT: entry:
72 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
73 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
75 vint64m4_t
test_vsext_vf8_i64m4_tum(vbool16_t mask
, vint64m4_t maskedoff
, vint8mf2_t op1
, size_t vl
) {
76 return __riscv_vsext_vf8_tum(mask
, maskedoff
, op1
, vl
);
79 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsext_vf8_i64m8_tum
80 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
81 // CHECK-RV64-NEXT: entry:
82 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
83 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
85 vint64m8_t
test_vsext_vf8_i64m8_tum(vbool8_t mask
, vint64m8_t maskedoff
, vint8m1_t op1
, size_t vl
) {
86 return __riscv_vsext_vf8_tum(mask
, maskedoff
, op1
, vl
);
89 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsext_vf8_i64m1_tumu
90 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
91 // CHECK-RV64-NEXT: entry:
92 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
93 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
95 vint64m1_t
test_vsext_vf8_i64m1_tumu(vbool64_t mask
, vint64m1_t maskedoff
, vint8mf8_t op1
, size_t vl
) {
96 return __riscv_vsext_vf8_tumu(mask
, maskedoff
, op1
, vl
);
99 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsext_vf8_i64m2_tumu
100 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
101 // CHECK-RV64-NEXT: entry:
102 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
103 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
105 vint64m2_t
test_vsext_vf8_i64m2_tumu(vbool32_t mask
, vint64m2_t maskedoff
, vint8mf4_t op1
, size_t vl
) {
106 return __riscv_vsext_vf8_tumu(mask
, maskedoff
, op1
, vl
);
109 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsext_vf8_i64m4_tumu
110 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
111 // CHECK-RV64-NEXT: entry:
112 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
113 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
115 vint64m4_t
test_vsext_vf8_i64m4_tumu(vbool16_t mask
, vint64m4_t maskedoff
, vint8mf2_t op1
, size_t vl
) {
116 return __riscv_vsext_vf8_tumu(mask
, maskedoff
, op1
, vl
);
119 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsext_vf8_i64m8_tumu
120 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
121 // CHECK-RV64-NEXT: entry:
122 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
123 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
125 vint64m8_t
test_vsext_vf8_i64m8_tumu(vbool8_t mask
, vint64m8_t maskedoff
, vint8m1_t op1
, size_t vl
) {
126 return __riscv_vsext_vf8_tumu(mask
, maskedoff
, op1
, vl
);
129 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsext_vf8_i64m1_mu
130 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
131 // CHECK-RV64-NEXT: entry:
132 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
133 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
135 vint64m1_t
test_vsext_vf8_i64m1_mu(vbool64_t mask
, vint64m1_t maskedoff
, vint8mf8_t op1
, size_t vl
) {
136 return __riscv_vsext_vf8_mu(mask
, maskedoff
, op1
, vl
);
139 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsext_vf8_i64m2_mu
140 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
141 // CHECK-RV64-NEXT: entry:
142 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
143 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
145 vint64m2_t
test_vsext_vf8_i64m2_mu(vbool32_t mask
, vint64m2_t maskedoff
, vint8mf4_t op1
, size_t vl
) {
146 return __riscv_vsext_vf8_mu(mask
, maskedoff
, op1
, vl
);
149 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsext_vf8_i64m4_mu
150 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
151 // CHECK-RV64-NEXT: entry:
152 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
153 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
155 vint64m4_t
test_vsext_vf8_i64m4_mu(vbool16_t mask
, vint64m4_t maskedoff
, vint8mf2_t op1
, size_t vl
) {
156 return __riscv_vsext_vf8_mu(mask
, maskedoff
, op1
, vl
);
159 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsext_vf8_i64m8_mu
160 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
161 // CHECK-RV64-NEXT: entry:
162 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
163 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
165 vint64m8_t
test_vsext_vf8_i64m8_mu(vbool8_t mask
, vint64m8_t maskedoff
, vint8m1_t op1
, size_t vl
) {
166 return __riscv_vsext_vf8_mu(mask
, maskedoff
, op1
, vl
);