1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
5 declare <vscale x 1 x i16> @llvm.abs.nxv1i16(<vscale x 1 x i16>, i1)
7 define <vscale x 1 x i16> @vabs_nxv1i16(<vscale x 1 x i16> %v) {
8 ; CHECK-LABEL: vabs_nxv1i16:
10 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
11 ; CHECK-NEXT: vrsub.vi v9, v8, 0
12 ; CHECK-NEXT: vmax.vv v8, v8, v9
14 %r = call <vscale x 1 x i16> @llvm.abs.nxv1i16(<vscale x 1 x i16> %v, i1 false)
15 ret <vscale x 1 x i16> %r
18 declare <vscale x 2 x i16> @llvm.abs.nxv2i16(<vscale x 2 x i16>, i1)
20 define <vscale x 2 x i16> @vabs_nxv2i16(<vscale x 2 x i16> %v) {
21 ; CHECK-LABEL: vabs_nxv2i16:
23 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
24 ; CHECK-NEXT: vrsub.vi v9, v8, 0
25 ; CHECK-NEXT: vmax.vv v8, v8, v9
27 %r = call <vscale x 2 x i16> @llvm.abs.nxv2i16(<vscale x 2 x i16> %v, i1 false)
28 ret <vscale x 2 x i16> %r
31 declare <vscale x 4 x i16> @llvm.abs.nxv4i16(<vscale x 4 x i16>, i1)
33 define <vscale x 4 x i16> @vabs_nxv4i16(<vscale x 4 x i16> %v) {
34 ; CHECK-LABEL: vabs_nxv4i16:
36 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
37 ; CHECK-NEXT: vrsub.vi v9, v8, 0
38 ; CHECK-NEXT: vmax.vv v8, v8, v9
40 %r = call <vscale x 4 x i16> @llvm.abs.nxv4i16(<vscale x 4 x i16> %v, i1 false)
41 ret <vscale x 4 x i16> %r
44 declare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1)
46 define <vscale x 8 x i16> @vabs_nxv8i16(<vscale x 8 x i16> %v) {
47 ; CHECK-LABEL: vabs_nxv8i16:
49 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
50 ; CHECK-NEXT: vrsub.vi v10, v8, 0
51 ; CHECK-NEXT: vmax.vv v8, v8, v10
53 %r = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %v, i1 false)
54 ret <vscale x 8 x i16> %r
57 declare <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16>, i1)
59 define <vscale x 16 x i16> @vabs_nxv16i16(<vscale x 16 x i16> %v) {
60 ; CHECK-LABEL: vabs_nxv16i16:
62 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
63 ; CHECK-NEXT: vrsub.vi v12, v8, 0
64 ; CHECK-NEXT: vmax.vv v8, v8, v12
66 %r = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %v, i1 false)
67 ret <vscale x 16 x i16> %r
70 declare <vscale x 32 x i16> @llvm.abs.nxv32i16(<vscale x 32 x i16>, i1)
72 define <vscale x 32 x i16> @vabs_nxv32i16(<vscale x 32 x i16> %v) {
73 ; CHECK-LABEL: vabs_nxv32i16:
75 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
76 ; CHECK-NEXT: vrsub.vi v16, v8, 0
77 ; CHECK-NEXT: vmax.vv v8, v8, v16
79 %r = call <vscale x 32 x i16> @llvm.abs.nxv32i16(<vscale x 32 x i16> %v, i1 false)
80 ret <vscale x 32 x i16> %r
83 declare <vscale x 1 x i32> @llvm.abs.nxv1i32(<vscale x 1 x i32>, i1)
85 define <vscale x 1 x i32> @vabs_nxv1i32(<vscale x 1 x i32> %v) {
86 ; CHECK-LABEL: vabs_nxv1i32:
88 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
89 ; CHECK-NEXT: vrsub.vi v9, v8, 0
90 ; CHECK-NEXT: vmax.vv v8, v8, v9
92 %r = call <vscale x 1 x i32> @llvm.abs.nxv1i32(<vscale x 1 x i32> %v, i1 false)
93 ret <vscale x 1 x i32> %r
96 declare <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32>, i1)
98 define <vscale x 2 x i32> @vabs_nxv2i32(<vscale x 2 x i32> %v) {
99 ; CHECK-LABEL: vabs_nxv2i32:
101 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
102 ; CHECK-NEXT: vrsub.vi v9, v8, 0
103 ; CHECK-NEXT: vmax.vv v8, v8, v9
105 %r = call <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32> %v, i1 false)
106 ret <vscale x 2 x i32> %r
109 declare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1)
111 define <vscale x 4 x i32> @vabs_nxv4i32(<vscale x 4 x i32> %v) {
112 ; CHECK-LABEL: vabs_nxv4i32:
114 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
115 ; CHECK-NEXT: vrsub.vi v10, v8, 0
116 ; CHECK-NEXT: vmax.vv v8, v8, v10
118 %r = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %v, i1 false)
119 ret <vscale x 4 x i32> %r
122 declare <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32>, i1)
124 define <vscale x 8 x i32> @vabs_nxv8i32(<vscale x 8 x i32> %v) {
125 ; CHECK-LABEL: vabs_nxv8i32:
127 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
128 ; CHECK-NEXT: vrsub.vi v12, v8, 0
129 ; CHECK-NEXT: vmax.vv v8, v8, v12
131 %r = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %v, i1 false)
132 ret <vscale x 8 x i32> %r
135 declare <vscale x 16 x i32> @llvm.abs.nxv16i32(<vscale x 16 x i32>, i1)
137 define <vscale x 16 x i32> @vabs_nxv16i32(<vscale x 16 x i32> %v) {
138 ; CHECK-LABEL: vabs_nxv16i32:
140 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
141 ; CHECK-NEXT: vrsub.vi v16, v8, 0
142 ; CHECK-NEXT: vmax.vv v8, v8, v16
144 %r = call <vscale x 16 x i32> @llvm.abs.nxv16i32(<vscale x 16 x i32> %v, i1 false)
145 ret <vscale x 16 x i32> %r
148 declare <vscale x 1 x i64> @llvm.abs.nxv1i64(<vscale x 1 x i64>, i1)
150 define <vscale x 1 x i64> @vabs_nxv1i64(<vscale x 1 x i64> %v) {
151 ; CHECK-LABEL: vabs_nxv1i64:
153 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
154 ; CHECK-NEXT: vrsub.vi v9, v8, 0
155 ; CHECK-NEXT: vmax.vv v8, v8, v9
157 %r = call <vscale x 1 x i64> @llvm.abs.nxv1i64(<vscale x 1 x i64> %v, i1 false)
158 ret <vscale x 1 x i64> %r
161 declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1)
163 define <vscale x 2 x i64> @vabs_nxv2i64(<vscale x 2 x i64> %v) {
164 ; CHECK-LABEL: vabs_nxv2i64:
166 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
167 ; CHECK-NEXT: vrsub.vi v10, v8, 0
168 ; CHECK-NEXT: vmax.vv v8, v8, v10
170 %r = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %v, i1 false)
171 ret <vscale x 2 x i64> %r
174 declare <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64>, i1)
176 define <vscale x 4 x i64> @vabs_nxv4i64(<vscale x 4 x i64> %v) {
177 ; CHECK-LABEL: vabs_nxv4i64:
179 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
180 ; CHECK-NEXT: vrsub.vi v12, v8, 0
181 ; CHECK-NEXT: vmax.vv v8, v8, v12
183 %r = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %v, i1 false)
184 ret <vscale x 4 x i64> %r
187 declare <vscale x 8 x i64> @llvm.abs.nxv8i64(<vscale x 8 x i64>, i1)
189 define <vscale x 8 x i64> @vabs_nxv8i64(<vscale x 8 x i64> %v) {
190 ; CHECK-LABEL: vabs_nxv8i64:
192 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
193 ; CHECK-NEXT: vrsub.vi v16, v8, 0
194 ; CHECK-NEXT: vmax.vv v8, v8, v16
196 %r = call <vscale x 8 x i64> @llvm.abs.nxv8i64(<vscale x 8 x i64> %v, i1 false)
197 ret <vscale x 8 x i64> %r