Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-handcrafted / vsmul-eew64-overloaded.c
blob609ddae544c87e37f7d9f51ce577aa841d44646f
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4 // NOTE: The purpose of separating these 3 instructions from vsmul.c is that
5 // eew=64 versions only enable when V extension is specified. (Not for zve)
7 #include <riscv_vector.h>
9 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
10 // CHECK-RV64-NEXT: entry:
11 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
14 vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
15 return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
18 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
19 // CHECK-RV64-NEXT: entry:
20 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
23 vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
24 return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
27 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
28 // CHECK-RV64-NEXT: entry:
29 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
30 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
32 vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
33 return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
36 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
37 // CHECK-RV64-NEXT: entry:
38 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
39 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
41 vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
42 return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
45 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
46 // CHECK-RV64-NEXT: entry:
47 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
48 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
50 vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
51 return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
54 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
55 // CHECK-RV64-NEXT: entry:
56 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
57 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
59 vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
60 return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
63 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
64 // CHECK-RV64-NEXT: entry:
65 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
66 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
68 vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
69 return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
72 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
73 // CHECK-RV64-NEXT: entry:
74 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
75 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
77 vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
78 return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
81 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m(
82 // CHECK-RV64-NEXT: entry:
83 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
84 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
86 vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
87 return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
90 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m(
91 // CHECK-RV64-NEXT: entry:
92 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
93 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
95 vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
96 return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
99 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m(
100 // CHECK-RV64-NEXT: entry:
101 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
102 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
104 vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
105 return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
108 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m(
109 // CHECK-RV64-NEXT: entry:
110 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
111 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
113 vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
114 return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
117 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m(
118 // CHECK-RV64-NEXT: entry:
119 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
120 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
122 vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
123 return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
126 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m(
127 // CHECK-RV64-NEXT: entry:
128 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
129 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
131 vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
132 return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
135 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m(
136 // CHECK-RV64-NEXT: entry:
137 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
138 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
140 vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
141 return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
144 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m(
145 // CHECK-RV64-NEXT: entry:
146 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
147 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
149 vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
150 return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);