[AMDGPU] Implement IR variant of isFMAFasterThanFMulAndFAdd (#121465)
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-autogenerated / non-policy / overloaded / vle8ff.c
blobddd10d28b29354efe7c3a2863441da5b58464f07
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
4 // RUN: -target-feature +zvfh -disable-O0-optnone \
5 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
6 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
8 #include <riscv_vector.h>
10 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vle8ff_v_i8mf8_m
11 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
12 // CHECK-RV64-NEXT: entry:
13 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.mask.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
14 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP0]], 0
15 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP0]], 1
16 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
17 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
19 vint8mf8_t test_vle8ff_v_i8mf8_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
20 return __riscv_vle8ff(mask, base, new_vl, vl);
23 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vle8ff_v_i8mf4_m
24 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
25 // CHECK-RV64-NEXT: entry:
26 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.mask.nxv2i8.i64(<vscale x 2 x i8> poison, ptr [[BASE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
27 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP0]], 0
28 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP0]], 1
29 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
30 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
32 vint8mf4_t test_vle8ff_v_i8mf4_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
33 return __riscv_vle8ff(mask, base, new_vl, vl);
36 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vle8ff_v_i8mf2_m
37 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
38 // CHECK-RV64-NEXT: entry:
39 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.mask.nxv4i8.i64(<vscale x 4 x i8> poison, ptr [[BASE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
40 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP0]], 0
41 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP0]], 1
42 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
43 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
45 vint8mf2_t test_vle8ff_v_i8mf2_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
46 return __riscv_vle8ff(mask, base, new_vl, vl);
49 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vle8ff_v_i8m1_m
50 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
51 // CHECK-RV64-NEXT: entry:
52 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8> poison, ptr [[BASE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
53 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP0]], 0
54 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP0]], 1
55 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
56 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
58 vint8m1_t test_vle8ff_v_i8m1_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
59 return __riscv_vle8ff(mask, base, new_vl, vl);
62 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vle8ff_v_i8m2_m
63 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
64 // CHECK-RV64-NEXT: entry:
65 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.mask.nxv16i8.i64(<vscale x 16 x i8> poison, ptr [[BASE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
66 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP0]], 0
67 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP0]], 1
68 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
69 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
71 vint8m2_t test_vle8ff_v_i8m2_m(vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
72 return __riscv_vle8ff(mask, base, new_vl, vl);
75 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vle8ff_v_i8m4_m
76 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
77 // CHECK-RV64-NEXT: entry:
78 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.mask.nxv32i8.i64(<vscale x 32 x i8> poison, ptr [[BASE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
79 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP0]], 0
80 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP0]], 1
81 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
82 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
84 vint8m4_t test_vle8ff_v_i8m4_m(vbool2_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
85 return __riscv_vle8ff(mask, base, new_vl, vl);
88 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vle8ff_v_i8m8_m
89 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
90 // CHECK-RV64-NEXT: entry:
91 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.mask.nxv64i8.i64(<vscale x 64 x i8> poison, ptr [[BASE]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 3)
92 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP0]], 0
93 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP0]], 1
94 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
95 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
97 vint8m8_t test_vle8ff_v_i8m8_m(vbool1_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
98 return __riscv_vle8ff(mask, base, new_vl, vl);
101 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vle8ff_v_u8mf8_m
102 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
103 // CHECK-RV64-NEXT: entry:
104 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.mask.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
105 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP0]], 0
106 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP0]], 1
107 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
108 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
110 vuint8mf8_t test_vle8ff_v_u8mf8_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
111 return __riscv_vle8ff(mask, base, new_vl, vl);
114 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vle8ff_v_u8mf4_m
115 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
116 // CHECK-RV64-NEXT: entry:
117 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.mask.nxv2i8.i64(<vscale x 2 x i8> poison, ptr [[BASE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
118 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP0]], 0
119 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP0]], 1
120 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
121 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
123 vuint8mf4_t test_vle8ff_v_u8mf4_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
124 return __riscv_vle8ff(mask, base, new_vl, vl);
127 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vle8ff_v_u8mf2_m
128 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
129 // CHECK-RV64-NEXT: entry:
130 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.mask.nxv4i8.i64(<vscale x 4 x i8> poison, ptr [[BASE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
131 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP0]], 0
132 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP0]], 1
133 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
134 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
136 vuint8mf2_t test_vle8ff_v_u8mf2_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
137 return __riscv_vle8ff(mask, base, new_vl, vl);
140 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vle8ff_v_u8m1_m
141 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
142 // CHECK-RV64-NEXT: entry:
143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8> poison, ptr [[BASE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
144 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP0]], 0
145 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP0]], 1
146 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
147 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
149 vuint8m1_t test_vle8ff_v_u8m1_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
150 return __riscv_vle8ff(mask, base, new_vl, vl);
153 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vle8ff_v_u8m2_m
154 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
155 // CHECK-RV64-NEXT: entry:
156 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.mask.nxv16i8.i64(<vscale x 16 x i8> poison, ptr [[BASE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
157 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP0]], 0
158 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP0]], 1
159 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
160 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
162 vuint8m2_t test_vle8ff_v_u8m2_m(vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
163 return __riscv_vle8ff(mask, base, new_vl, vl);
166 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vle8ff_v_u8m4_m
167 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
168 // CHECK-RV64-NEXT: entry:
169 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.mask.nxv32i8.i64(<vscale x 32 x i8> poison, ptr [[BASE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
170 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP0]], 0
171 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP0]], 1
172 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
173 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
175 vuint8m4_t test_vle8ff_v_u8m4_m(vbool2_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
176 return __riscv_vle8ff(mask, base, new_vl, vl);
179 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vle8ff_v_u8m8_m
180 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
181 // CHECK-RV64-NEXT: entry:
182 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.mask.nxv64i8.i64(<vscale x 64 x i8> poison, ptr [[BASE]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 3)
183 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP0]], 0
184 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP0]], 1
185 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
186 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
188 vuint8m8_t test_vle8ff_v_u8m8_m(vbool1_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
189 return __riscv_vle8ff(mask, base, new_vl, vl);