[AMDGPU] Implement IR variant of isFMAFasterThanFMulAndFAdd (#121465)
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-autogenerated / non-policy / overloaded / vle32.c
bloba0d25c36884715230f84fbbbcc2ffd3fecb671a5
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
4 // RUN: -target-feature +zvfh -disable-O0-optnone \
5 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
6 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
8 #include <riscv_vector.h>
10 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vle32_v_f32mf2_m
11 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
12 // CHECK-RV64-NEXT: entry:
13 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32.i64(<vscale x 1 x float> poison, ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
14 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
16 vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, const float *base, size_t vl) {
17 return __riscv_vle32(mask, base, vl);
20 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vle32_v_f32m1_m
21 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
22 // CHECK-RV64-NEXT: entry:
23 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32.i64(<vscale x 2 x float> poison, ptr [[BASE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
24 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
26 vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, const float *base, size_t vl) {
27 return __riscv_vle32(mask, base, vl);
30 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vle32_v_f32m2_m
31 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
32 // CHECK-RV64-NEXT: entry:
33 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32.i64(<vscale x 4 x float> poison, ptr [[BASE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
34 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
36 vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, const float *base, size_t vl) {
37 return __riscv_vle32(mask, base, vl);
40 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vle32_v_f32m4_m
41 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
42 // CHECK-RV64-NEXT: entry:
43 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32.i64(<vscale x 8 x float> poison, ptr [[BASE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
44 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
46 vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, const float *base, size_t vl) {
47 return __riscv_vle32(mask, base, vl);
50 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vle32_v_f32m8_m
51 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
52 // CHECK-RV64-NEXT: entry:
53 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32.i64(<vscale x 16 x float> poison, ptr [[BASE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
54 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
56 vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, const float *base, size_t vl) {
57 return __riscv_vle32(mask, base, vl);
60 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vle32_v_i32mf2_m
61 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
62 // CHECK-RV64-NEXT: entry:
63 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
64 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
66 vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, const int32_t *base, size_t vl) {
67 return __riscv_vle32(mask, base, vl);
70 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vle32_v_i32m1_m
71 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
72 // CHECK-RV64-NEXT: entry:
73 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32.i64(<vscale x 2 x i32> poison, ptr [[BASE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
74 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
76 vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, const int32_t *base, size_t vl) {
77 return __riscv_vle32(mask, base, vl);
80 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vle32_v_i32m2_m
81 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
82 // CHECK-RV64-NEXT: entry:
83 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32.i64(<vscale x 4 x i32> poison, ptr [[BASE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
84 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
86 vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, const int32_t *base, size_t vl) {
87 return __riscv_vle32(mask, base, vl);
90 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vle32_v_i32m4_m
91 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
92 // CHECK-RV64-NEXT: entry:
93 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32.i64(<vscale x 8 x i32> poison, ptr [[BASE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
94 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
96 vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, const int32_t *base, size_t vl) {
97 return __riscv_vle32(mask, base, vl);
100 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vle32_v_i32m8_m
101 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
102 // CHECK-RV64-NEXT: entry:
103 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32.i64(<vscale x 16 x i32> poison, ptr [[BASE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
104 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
106 vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, const int32_t *base, size_t vl) {
107 return __riscv_vle32(mask, base, vl);
110 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vle32_v_u32mf2_m
111 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
112 // CHECK-RV64-NEXT: entry:
113 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
114 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
116 vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, size_t vl) {
117 return __riscv_vle32(mask, base, vl);
120 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vle32_v_u32m1_m
121 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
122 // CHECK-RV64-NEXT: entry:
123 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32.i64(<vscale x 2 x i32> poison, ptr [[BASE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
124 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
126 vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, const uint32_t *base, size_t vl) {
127 return __riscv_vle32(mask, base, vl);
130 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vle32_v_u32m2_m
131 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
132 // CHECK-RV64-NEXT: entry:
133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32.i64(<vscale x 4 x i32> poison, ptr [[BASE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
134 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
136 vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, const uint32_t *base, size_t vl) {
137 return __riscv_vle32(mask, base, vl);
140 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vle32_v_u32m4_m
141 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
142 // CHECK-RV64-NEXT: entry:
143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32.i64(<vscale x 8 x i32> poison, ptr [[BASE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
144 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
146 vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, const uint32_t *base, size_t vl) {
147 return __riscv_vle32(mask, base, vl);
150 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vle32_v_u32m8_m
151 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
152 // CHECK-RV64-NEXT: entry:
153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32.i64(<vscale x 16 x i32> poison, ptr [[BASE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
154 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
156 vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, const uint32_t *base, size_t vl) {
157 return __riscv_vle32(mask, base, vl);