1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 8 x i7> @llvm.vp.smax.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
9 define <vscale x 8 x i7> @vmax_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
10 ; CHECK-LABEL: vmax_vx_nxv8i7:
12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
13 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
14 ; CHECK-NEXT: vsra.vi v8, v8, 1, v0.t
15 ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
16 ; CHECK-NEXT: vmv.v.x v9, a0
17 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
18 ; CHECK-NEXT: vsll.vi v9, v9, 1, v0.t
19 ; CHECK-NEXT: vsra.vi v9, v9, 1, v0.t
20 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
22 %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
23 %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
24 %v = call <vscale x 8 x i7> @llvm.vp.smax.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
25 ret <vscale x 8 x i7> %v
28 declare <vscale x 1 x i8> @llvm.vp.smax.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
30 define <vscale x 1 x i8> @vmax_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
31 ; CHECK-LABEL: vmax_vv_nxv1i8:
33 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
34 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
36 %v = call <vscale x 1 x i8> @llvm.vp.smax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
37 ret <vscale x 1 x i8> %v
40 define <vscale x 1 x i8> @vmax_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
41 ; CHECK-LABEL: vmax_vv_nxv1i8_unmasked:
43 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
44 ; CHECK-NEXT: vmax.vv v8, v8, v9
46 %v = call <vscale x 1 x i8> @llvm.vp.smax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
47 ret <vscale x 1 x i8> %v
50 define <vscale x 1 x i8> @vmax_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
51 ; CHECK-LABEL: vmax_vx_nxv1i8:
53 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
54 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
56 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
57 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
58 %v = call <vscale x 1 x i8> @llvm.vp.smax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
59 ret <vscale x 1 x i8> %v
62 define <vscale x 1 x i8> @vmax_vx_nxv1i8_commute(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
63 ; CHECK-LABEL: vmax_vx_nxv1i8_commute:
65 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
66 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
68 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
69 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
70 %v = call <vscale x 1 x i8> @llvm.vp.smax.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
71 ret <vscale x 1 x i8> %v
74 define <vscale x 1 x i8> @vmax_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
75 ; CHECK-LABEL: vmax_vx_nxv1i8_unmasked:
77 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
78 ; CHECK-NEXT: vmax.vx v8, v8, a0
80 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
81 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
82 %v = call <vscale x 1 x i8> @llvm.vp.smax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
83 ret <vscale x 1 x i8> %v
86 declare <vscale x 2 x i8> @llvm.vp.smax.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
88 define <vscale x 2 x i8> @vmax_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
89 ; CHECK-LABEL: vmax_vv_nxv2i8:
91 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
92 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
94 %v = call <vscale x 2 x i8> @llvm.vp.smax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
95 ret <vscale x 2 x i8> %v
98 define <vscale x 2 x i8> @vmax_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
99 ; CHECK-LABEL: vmax_vv_nxv2i8_unmasked:
101 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
102 ; CHECK-NEXT: vmax.vv v8, v8, v9
104 %v = call <vscale x 2 x i8> @llvm.vp.smax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
105 ret <vscale x 2 x i8> %v
108 define <vscale x 2 x i8> @vmax_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
109 ; CHECK-LABEL: vmax_vx_nxv2i8:
111 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
112 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
114 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
115 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
116 %v = call <vscale x 2 x i8> @llvm.vp.smax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
117 ret <vscale x 2 x i8> %v
120 define <vscale x 2 x i8> @vmax_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
121 ; CHECK-LABEL: vmax_vx_nxv2i8_unmasked:
123 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
124 ; CHECK-NEXT: vmax.vx v8, v8, a0
126 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
127 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
128 %v = call <vscale x 2 x i8> @llvm.vp.smax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
129 ret <vscale x 2 x i8> %v
132 declare <vscale x 3 x i8> @llvm.vp.smax.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
134 define <vscale x 3 x i8> @vmax_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
135 ; CHECK-LABEL: vmax_vv_nxv3i8:
137 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
138 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
140 %v = call <vscale x 3 x i8> @llvm.vp.smax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
141 ret <vscale x 3 x i8> %v
144 define <vscale x 3 x i8> @vmax_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, i32 zeroext %evl) {
145 ; CHECK-LABEL: vmax_vv_nxv3i8_unmasked:
147 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
148 ; CHECK-NEXT: vmax.vv v8, v8, v9
150 %v = call <vscale x 3 x i8> @llvm.vp.smax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> splat (i1 true), i32 %evl)
151 ret <vscale x 3 x i8> %v
154 define <vscale x 3 x i8> @vmax_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
155 ; CHECK-LABEL: vmax_vx_nxv3i8:
157 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
158 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
160 %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
161 %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
162 %v = call <vscale x 3 x i8> @llvm.vp.smax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
163 ret <vscale x 3 x i8> %v
166 define <vscale x 3 x i8> @vmax_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b, i32 zeroext %evl) {
167 ; CHECK-LABEL: vmax_vx_nxv3i8_unmasked:
169 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
170 ; CHECK-NEXT: vmax.vx v8, v8, a0
172 %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
173 %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
174 %v = call <vscale x 3 x i8> @llvm.vp.smax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> splat (i1 true), i32 %evl)
175 ret <vscale x 3 x i8> %v
178 declare <vscale x 4 x i8> @llvm.vp.smax.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
180 define <vscale x 4 x i8> @vmax_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
181 ; CHECK-LABEL: vmax_vv_nxv4i8:
183 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
184 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
186 %v = call <vscale x 4 x i8> @llvm.vp.smax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
187 ret <vscale x 4 x i8> %v
190 define <vscale x 4 x i8> @vmax_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
191 ; CHECK-LABEL: vmax_vv_nxv4i8_unmasked:
193 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
194 ; CHECK-NEXT: vmax.vv v8, v8, v9
196 %v = call <vscale x 4 x i8> @llvm.vp.smax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
197 ret <vscale x 4 x i8> %v
200 define <vscale x 4 x i8> @vmax_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
201 ; CHECK-LABEL: vmax_vx_nxv4i8:
203 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
204 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
206 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
207 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
208 %v = call <vscale x 4 x i8> @llvm.vp.smax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
209 ret <vscale x 4 x i8> %v
212 define <vscale x 4 x i8> @vmax_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
213 ; CHECK-LABEL: vmax_vx_nxv4i8_unmasked:
215 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
216 ; CHECK-NEXT: vmax.vx v8, v8, a0
218 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
219 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
220 %v = call <vscale x 4 x i8> @llvm.vp.smax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
221 ret <vscale x 4 x i8> %v
224 declare <vscale x 8 x i8> @llvm.vp.smax.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
226 define <vscale x 8 x i8> @vmax_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
227 ; CHECK-LABEL: vmax_vv_nxv8i8:
229 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
230 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
232 %v = call <vscale x 8 x i8> @llvm.vp.smax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
233 ret <vscale x 8 x i8> %v
236 define <vscale x 8 x i8> @vmax_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
237 ; CHECK-LABEL: vmax_vv_nxv8i8_unmasked:
239 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
240 ; CHECK-NEXT: vmax.vv v8, v8, v9
242 %v = call <vscale x 8 x i8> @llvm.vp.smax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
243 ret <vscale x 8 x i8> %v
246 define <vscale x 8 x i8> @vmax_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
247 ; CHECK-LABEL: vmax_vx_nxv8i8:
249 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
250 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
252 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
253 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
254 %v = call <vscale x 8 x i8> @llvm.vp.smax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
255 ret <vscale x 8 x i8> %v
258 define <vscale x 8 x i8> @vmax_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
259 ; CHECK-LABEL: vmax_vx_nxv8i8_unmasked:
261 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
262 ; CHECK-NEXT: vmax.vx v8, v8, a0
264 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
265 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
266 %v = call <vscale x 8 x i8> @llvm.vp.smax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
267 ret <vscale x 8 x i8> %v
270 declare <vscale x 16 x i8> @llvm.vp.smax.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
272 define <vscale x 16 x i8> @vmax_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
273 ; CHECK-LABEL: vmax_vv_nxv16i8:
275 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
276 ; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
278 %v = call <vscale x 16 x i8> @llvm.vp.smax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
279 ret <vscale x 16 x i8> %v
282 define <vscale x 16 x i8> @vmax_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
283 ; CHECK-LABEL: vmax_vv_nxv16i8_unmasked:
285 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
286 ; CHECK-NEXT: vmax.vv v8, v8, v10
288 %v = call <vscale x 16 x i8> @llvm.vp.smax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
289 ret <vscale x 16 x i8> %v
292 define <vscale x 16 x i8> @vmax_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
293 ; CHECK-LABEL: vmax_vx_nxv16i8:
295 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
296 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
298 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
299 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
300 %v = call <vscale x 16 x i8> @llvm.vp.smax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
301 ret <vscale x 16 x i8> %v
304 define <vscale x 16 x i8> @vmax_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
305 ; CHECK-LABEL: vmax_vx_nxv16i8_unmasked:
307 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
308 ; CHECK-NEXT: vmax.vx v8, v8, a0
310 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
311 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
312 %v = call <vscale x 16 x i8> @llvm.vp.smax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
313 ret <vscale x 16 x i8> %v
316 declare <vscale x 32 x i8> @llvm.vp.smax.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
318 define <vscale x 32 x i8> @vmax_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
319 ; CHECK-LABEL: vmax_vv_nxv32i8:
321 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
322 ; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
324 %v = call <vscale x 32 x i8> @llvm.vp.smax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
325 ret <vscale x 32 x i8> %v
328 define <vscale x 32 x i8> @vmax_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
329 ; CHECK-LABEL: vmax_vv_nxv32i8_unmasked:
331 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
332 ; CHECK-NEXT: vmax.vv v8, v8, v12
334 %v = call <vscale x 32 x i8> @llvm.vp.smax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
335 ret <vscale x 32 x i8> %v
338 define <vscale x 32 x i8> @vmax_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
339 ; CHECK-LABEL: vmax_vx_nxv32i8:
341 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
342 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
344 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
345 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
346 %v = call <vscale x 32 x i8> @llvm.vp.smax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
347 ret <vscale x 32 x i8> %v
350 define <vscale x 32 x i8> @vmax_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
351 ; CHECK-LABEL: vmax_vx_nxv32i8_unmasked:
353 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
354 ; CHECK-NEXT: vmax.vx v8, v8, a0
356 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
357 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
358 %v = call <vscale x 32 x i8> @llvm.vp.smax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
359 ret <vscale x 32 x i8> %v
362 declare <vscale x 64 x i8> @llvm.vp.smax.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
364 define <vscale x 64 x i8> @vmax_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
365 ; CHECK-LABEL: vmax_vv_nxv64i8:
367 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
368 ; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
370 %v = call <vscale x 64 x i8> @llvm.vp.smax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
371 ret <vscale x 64 x i8> %v
374 define <vscale x 64 x i8> @vmax_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
375 ; CHECK-LABEL: vmax_vv_nxv64i8_unmasked:
377 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
378 ; CHECK-NEXT: vmax.vv v8, v8, v16
380 %v = call <vscale x 64 x i8> @llvm.vp.smax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
381 ret <vscale x 64 x i8> %v
384 define <vscale x 64 x i8> @vmax_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
385 ; CHECK-LABEL: vmax_vx_nxv64i8:
387 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
388 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
390 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
391 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
392 %v = call <vscale x 64 x i8> @llvm.vp.smax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
393 ret <vscale x 64 x i8> %v
396 define <vscale x 64 x i8> @vmax_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
397 ; CHECK-LABEL: vmax_vx_nxv64i8_unmasked:
399 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
400 ; CHECK-NEXT: vmax.vx v8, v8, a0
402 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
403 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
404 %v = call <vscale x 64 x i8> @llvm.vp.smax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
405 ret <vscale x 64 x i8> %v
408 ; Test that split-legalization works when the mask itself needs splitting.
410 declare <vscale x 128 x i8> @llvm.vp.smax.nxv128i8(<vscale x 128 x i8>, <vscale x 128 x i8>, <vscale x 128 x i1>, i32)
412 define <vscale x 128 x i8> @vmax_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
413 ; CHECK-LABEL: vmax_vx_nxv128i8:
415 ; CHECK-NEXT: vmv1r.v v24, v0
416 ; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
417 ; CHECK-NEXT: vlm.v v0, (a1)
418 ; CHECK-NEXT: csrr a1, vlenb
419 ; CHECK-NEXT: slli a1, a1, 3
420 ; CHECK-NEXT: sub a3, a2, a1
421 ; CHECK-NEXT: sltu a4, a2, a3
422 ; CHECK-NEXT: addi a4, a4, -1
423 ; CHECK-NEXT: and a3, a4, a3
424 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
425 ; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t
426 ; CHECK-NEXT: bltu a2, a1, .LBB34_2
427 ; CHECK-NEXT: # %bb.1:
428 ; CHECK-NEXT: mv a2, a1
429 ; CHECK-NEXT: .LBB34_2:
430 ; CHECK-NEXT: vmv1r.v v0, v24
431 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
432 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
434 %elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
435 %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
436 %v = call <vscale x 128 x i8> @llvm.vp.smax.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
437 ret <vscale x 128 x i8> %v
440 define <vscale x 128 x i8> @vmax_vx_nxv128i8_unmasked(<vscale x 128 x i8> %va, i8 %b, i32 zeroext %evl) {
441 ; CHECK-LABEL: vmax_vx_nxv128i8_unmasked:
443 ; CHECK-NEXT: csrr a2, vlenb
444 ; CHECK-NEXT: slli a2, a2, 3
445 ; CHECK-NEXT: sub a3, a1, a2
446 ; CHECK-NEXT: sltu a4, a1, a3
447 ; CHECK-NEXT: addi a4, a4, -1
448 ; CHECK-NEXT: and a3, a4, a3
449 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
450 ; CHECK-NEXT: vmax.vx v16, v16, a0
451 ; CHECK-NEXT: bltu a1, a2, .LBB35_2
452 ; CHECK-NEXT: # %bb.1:
453 ; CHECK-NEXT: mv a1, a2
454 ; CHECK-NEXT: .LBB35_2:
455 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
456 ; CHECK-NEXT: vmax.vx v8, v8, a0
458 %elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
459 %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
460 %v = call <vscale x 128 x i8> @llvm.vp.smax.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> splat (i1 true), i32 %evl)
461 ret <vscale x 128 x i8> %v
464 declare <vscale x 1 x i16> @llvm.vp.smax.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
466 define <vscale x 1 x i16> @vmax_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
467 ; CHECK-LABEL: vmax_vv_nxv1i16:
469 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
470 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
472 %v = call <vscale x 1 x i16> @llvm.vp.smax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
473 ret <vscale x 1 x i16> %v
476 define <vscale x 1 x i16> @vmax_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
477 ; CHECK-LABEL: vmax_vv_nxv1i16_unmasked:
479 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
480 ; CHECK-NEXT: vmax.vv v8, v8, v9
482 %v = call <vscale x 1 x i16> @llvm.vp.smax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
483 ret <vscale x 1 x i16> %v
486 define <vscale x 1 x i16> @vmax_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
487 ; CHECK-LABEL: vmax_vx_nxv1i16:
489 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
490 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
492 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
493 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
494 %v = call <vscale x 1 x i16> @llvm.vp.smax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
495 ret <vscale x 1 x i16> %v
498 define <vscale x 1 x i16> @vmax_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
499 ; CHECK-LABEL: vmax_vx_nxv1i16_unmasked:
501 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
502 ; CHECK-NEXT: vmax.vx v8, v8, a0
504 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
505 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
506 %v = call <vscale x 1 x i16> @llvm.vp.smax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
507 ret <vscale x 1 x i16> %v
510 declare <vscale x 2 x i16> @llvm.vp.smax.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
512 define <vscale x 2 x i16> @vmax_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
513 ; CHECK-LABEL: vmax_vv_nxv2i16:
515 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
516 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
518 %v = call <vscale x 2 x i16> @llvm.vp.smax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
519 ret <vscale x 2 x i16> %v
522 define <vscale x 2 x i16> @vmax_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
523 ; CHECK-LABEL: vmax_vv_nxv2i16_unmasked:
525 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
526 ; CHECK-NEXT: vmax.vv v8, v8, v9
528 %v = call <vscale x 2 x i16> @llvm.vp.smax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
529 ret <vscale x 2 x i16> %v
532 define <vscale x 2 x i16> @vmax_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
533 ; CHECK-LABEL: vmax_vx_nxv2i16:
535 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
536 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
538 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
539 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
540 %v = call <vscale x 2 x i16> @llvm.vp.smax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
541 ret <vscale x 2 x i16> %v
544 define <vscale x 2 x i16> @vmax_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
545 ; CHECK-LABEL: vmax_vx_nxv2i16_unmasked:
547 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
548 ; CHECK-NEXT: vmax.vx v8, v8, a0
550 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
551 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
552 %v = call <vscale x 2 x i16> @llvm.vp.smax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
553 ret <vscale x 2 x i16> %v
556 declare <vscale x 4 x i16> @llvm.vp.smax.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
558 define <vscale x 4 x i16> @vmax_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
559 ; CHECK-LABEL: vmax_vv_nxv4i16:
561 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
562 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
564 %v = call <vscale x 4 x i16> @llvm.vp.smax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
565 ret <vscale x 4 x i16> %v
568 define <vscale x 4 x i16> @vmax_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
569 ; CHECK-LABEL: vmax_vv_nxv4i16_unmasked:
571 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
572 ; CHECK-NEXT: vmax.vv v8, v8, v9
574 %v = call <vscale x 4 x i16> @llvm.vp.smax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
575 ret <vscale x 4 x i16> %v
578 define <vscale x 4 x i16> @vmax_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
579 ; CHECK-LABEL: vmax_vx_nxv4i16:
581 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
582 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
584 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
585 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
586 %v = call <vscale x 4 x i16> @llvm.vp.smax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
587 ret <vscale x 4 x i16> %v
590 define <vscale x 4 x i16> @vmax_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
591 ; CHECK-LABEL: vmax_vx_nxv4i16_unmasked:
593 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
594 ; CHECK-NEXT: vmax.vx v8, v8, a0
596 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
597 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
598 %v = call <vscale x 4 x i16> @llvm.vp.smax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
599 ret <vscale x 4 x i16> %v
602 declare <vscale x 8 x i16> @llvm.vp.smax.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
604 define <vscale x 8 x i16> @vmax_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
605 ; CHECK-LABEL: vmax_vv_nxv8i16:
607 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
608 ; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
610 %v = call <vscale x 8 x i16> @llvm.vp.smax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
611 ret <vscale x 8 x i16> %v
614 define <vscale x 8 x i16> @vmax_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
615 ; CHECK-LABEL: vmax_vv_nxv8i16_unmasked:
617 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
618 ; CHECK-NEXT: vmax.vv v8, v8, v10
620 %v = call <vscale x 8 x i16> @llvm.vp.smax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
621 ret <vscale x 8 x i16> %v
624 define <vscale x 8 x i16> @vmax_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
625 ; CHECK-LABEL: vmax_vx_nxv8i16:
627 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
628 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
630 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
631 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
632 %v = call <vscale x 8 x i16> @llvm.vp.smax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
633 ret <vscale x 8 x i16> %v
636 define <vscale x 8 x i16> @vmax_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
637 ; CHECK-LABEL: vmax_vx_nxv8i16_unmasked:
639 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
640 ; CHECK-NEXT: vmax.vx v8, v8, a0
642 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
643 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
644 %v = call <vscale x 8 x i16> @llvm.vp.smax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
645 ret <vscale x 8 x i16> %v
648 declare <vscale x 16 x i16> @llvm.vp.smax.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
650 define <vscale x 16 x i16> @vmax_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
651 ; CHECK-LABEL: vmax_vv_nxv16i16:
653 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
654 ; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
656 %v = call <vscale x 16 x i16> @llvm.vp.smax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
657 ret <vscale x 16 x i16> %v
660 define <vscale x 16 x i16> @vmax_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
661 ; CHECK-LABEL: vmax_vv_nxv16i16_unmasked:
663 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
664 ; CHECK-NEXT: vmax.vv v8, v8, v12
666 %v = call <vscale x 16 x i16> @llvm.vp.smax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
667 ret <vscale x 16 x i16> %v
670 define <vscale x 16 x i16> @vmax_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
671 ; CHECK-LABEL: vmax_vx_nxv16i16:
673 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
674 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
676 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
677 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
678 %v = call <vscale x 16 x i16> @llvm.vp.smax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
679 ret <vscale x 16 x i16> %v
682 define <vscale x 16 x i16> @vmax_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
683 ; CHECK-LABEL: vmax_vx_nxv16i16_unmasked:
685 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
686 ; CHECK-NEXT: vmax.vx v8, v8, a0
688 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
689 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
690 %v = call <vscale x 16 x i16> @llvm.vp.smax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
691 ret <vscale x 16 x i16> %v
694 declare <vscale x 32 x i16> @llvm.vp.smax.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
696 define <vscale x 32 x i16> @vmax_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
697 ; CHECK-LABEL: vmax_vv_nxv32i16:
699 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
700 ; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
702 %v = call <vscale x 32 x i16> @llvm.vp.smax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
703 ret <vscale x 32 x i16> %v
706 define <vscale x 32 x i16> @vmax_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
707 ; CHECK-LABEL: vmax_vv_nxv32i16_unmasked:
709 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
710 ; CHECK-NEXT: vmax.vv v8, v8, v16
712 %v = call <vscale x 32 x i16> @llvm.vp.smax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
713 ret <vscale x 32 x i16> %v
716 define <vscale x 32 x i16> @vmax_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
717 ; CHECK-LABEL: vmax_vx_nxv32i16:
719 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
720 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
722 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
723 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
724 %v = call <vscale x 32 x i16> @llvm.vp.smax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
725 ret <vscale x 32 x i16> %v
728 define <vscale x 32 x i16> @vmax_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
729 ; CHECK-LABEL: vmax_vx_nxv32i16_unmasked:
731 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
732 ; CHECK-NEXT: vmax.vx v8, v8, a0
734 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
735 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
736 %v = call <vscale x 32 x i16> @llvm.vp.smax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
737 ret <vscale x 32 x i16> %v
740 declare <vscale x 1 x i32> @llvm.vp.smax.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
742 define <vscale x 1 x i32> @vmax_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
743 ; CHECK-LABEL: vmax_vv_nxv1i32:
745 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
746 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
748 %v = call <vscale x 1 x i32> @llvm.vp.smax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
749 ret <vscale x 1 x i32> %v
752 define <vscale x 1 x i32> @vmax_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
753 ; CHECK-LABEL: vmax_vv_nxv1i32_unmasked:
755 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
756 ; CHECK-NEXT: vmax.vv v8, v8, v9
758 %v = call <vscale x 1 x i32> @llvm.vp.smax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
759 ret <vscale x 1 x i32> %v
762 define <vscale x 1 x i32> @vmax_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
763 ; CHECK-LABEL: vmax_vx_nxv1i32:
765 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
766 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
768 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
769 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
770 %v = call <vscale x 1 x i32> @llvm.vp.smax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
771 ret <vscale x 1 x i32> %v
774 define <vscale x 1 x i32> @vmax_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
775 ; CHECK-LABEL: vmax_vx_nxv1i32_unmasked:
777 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
778 ; CHECK-NEXT: vmax.vx v8, v8, a0
780 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
781 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
782 %v = call <vscale x 1 x i32> @llvm.vp.smax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
783 ret <vscale x 1 x i32> %v
786 declare <vscale x 2 x i32> @llvm.vp.smax.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
788 define <vscale x 2 x i32> @vmax_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
789 ; CHECK-LABEL: vmax_vv_nxv2i32:
791 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
792 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
794 %v = call <vscale x 2 x i32> @llvm.vp.smax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
795 ret <vscale x 2 x i32> %v
798 define <vscale x 2 x i32> @vmax_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
799 ; CHECK-LABEL: vmax_vv_nxv2i32_unmasked:
801 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
802 ; CHECK-NEXT: vmax.vv v8, v8, v9
804 %v = call <vscale x 2 x i32> @llvm.vp.smax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
805 ret <vscale x 2 x i32> %v
808 define <vscale x 2 x i32> @vmax_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
809 ; CHECK-LABEL: vmax_vx_nxv2i32:
811 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
812 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
814 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
815 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
816 %v = call <vscale x 2 x i32> @llvm.vp.smax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
817 ret <vscale x 2 x i32> %v
820 define <vscale x 2 x i32> @vmax_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
821 ; CHECK-LABEL: vmax_vx_nxv2i32_unmasked:
823 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
824 ; CHECK-NEXT: vmax.vx v8, v8, a0
826 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
827 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
828 %v = call <vscale x 2 x i32> @llvm.vp.smax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
829 ret <vscale x 2 x i32> %v
832 declare <vscale x 4 x i32> @llvm.vp.smax.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
834 define <vscale x 4 x i32> @vmax_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
835 ; CHECK-LABEL: vmax_vv_nxv4i32:
837 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
838 ; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
840 %v = call <vscale x 4 x i32> @llvm.vp.smax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
841 ret <vscale x 4 x i32> %v
844 define <vscale x 4 x i32> @vmax_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
845 ; CHECK-LABEL: vmax_vv_nxv4i32_unmasked:
847 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
848 ; CHECK-NEXT: vmax.vv v8, v8, v10
850 %v = call <vscale x 4 x i32> @llvm.vp.smax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
851 ret <vscale x 4 x i32> %v
854 define <vscale x 4 x i32> @vmax_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
855 ; CHECK-LABEL: vmax_vx_nxv4i32:
857 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
858 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
860 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
861 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
862 %v = call <vscale x 4 x i32> @llvm.vp.smax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
863 ret <vscale x 4 x i32> %v
866 define <vscale x 4 x i32> @vmax_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
867 ; CHECK-LABEL: vmax_vx_nxv4i32_unmasked:
869 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
870 ; CHECK-NEXT: vmax.vx v8, v8, a0
872 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
873 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
874 %v = call <vscale x 4 x i32> @llvm.vp.smax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
875 ret <vscale x 4 x i32> %v
878 declare <vscale x 8 x i32> @llvm.vp.smax.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
880 define <vscale x 8 x i32> @vmax_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
881 ; CHECK-LABEL: vmax_vv_nxv8i32:
883 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
884 ; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
886 %v = call <vscale x 8 x i32> @llvm.vp.smax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
887 ret <vscale x 8 x i32> %v
890 define <vscale x 8 x i32> @vmax_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
891 ; CHECK-LABEL: vmax_vv_nxv8i32_unmasked:
893 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
894 ; CHECK-NEXT: vmax.vv v8, v8, v12
896 %v = call <vscale x 8 x i32> @llvm.vp.smax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
897 ret <vscale x 8 x i32> %v
900 define <vscale x 8 x i32> @vmax_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
901 ; CHECK-LABEL: vmax_vx_nxv8i32:
903 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
904 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
906 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
907 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
908 %v = call <vscale x 8 x i32> @llvm.vp.smax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
909 ret <vscale x 8 x i32> %v
912 define <vscale x 8 x i32> @vmax_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
913 ; CHECK-LABEL: vmax_vx_nxv8i32_unmasked:
915 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
916 ; CHECK-NEXT: vmax.vx v8, v8, a0
918 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
919 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
920 %v = call <vscale x 8 x i32> @llvm.vp.smax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
921 ret <vscale x 8 x i32> %v
924 declare <vscale x 16 x i32> @llvm.vp.smax.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
926 define <vscale x 16 x i32> @vmax_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
927 ; CHECK-LABEL: vmax_vv_nxv16i32:
929 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
930 ; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
932 %v = call <vscale x 16 x i32> @llvm.vp.smax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
933 ret <vscale x 16 x i32> %v
936 define <vscale x 16 x i32> @vmax_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
937 ; CHECK-LABEL: vmax_vv_nxv16i32_unmasked:
939 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
940 ; CHECK-NEXT: vmax.vv v8, v8, v16
942 %v = call <vscale x 16 x i32> @llvm.vp.smax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
943 ret <vscale x 16 x i32> %v
946 define <vscale x 16 x i32> @vmax_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
947 ; CHECK-LABEL: vmax_vx_nxv16i32:
949 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
950 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
952 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
953 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
954 %v = call <vscale x 16 x i32> @llvm.vp.smax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
955 ret <vscale x 16 x i32> %v
958 define <vscale x 16 x i32> @vmax_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
959 ; CHECK-LABEL: vmax_vx_nxv16i32_unmasked:
961 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
962 ; CHECK-NEXT: vmax.vx v8, v8, a0
964 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
965 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
966 %v = call <vscale x 16 x i32> @llvm.vp.smax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
967 ret <vscale x 16 x i32> %v
970 ; Test that split-legalization works then the mask needs manual splitting.
972 declare <vscale x 32 x i32> @llvm.vp.smax.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i32>, <vscale x 32 x i1>, i32)
974 define <vscale x 32 x i32> @vmax_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
975 ; CHECK-LABEL: vmax_vx_nxv32i32:
977 ; CHECK-NEXT: vmv1r.v v24, v0
978 ; CHECK-NEXT: csrr a2, vlenb
979 ; CHECK-NEXT: srli a3, a2, 2
980 ; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
981 ; CHECK-NEXT: vslidedown.vx v0, v0, a3
982 ; CHECK-NEXT: slli a2, a2, 1
983 ; CHECK-NEXT: sub a3, a1, a2
984 ; CHECK-NEXT: sltu a4, a1, a3
985 ; CHECK-NEXT: addi a4, a4, -1
986 ; CHECK-NEXT: and a3, a4, a3
987 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
988 ; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t
989 ; CHECK-NEXT: bltu a1, a2, .LBB80_2
990 ; CHECK-NEXT: # %bb.1:
991 ; CHECK-NEXT: mv a1, a2
992 ; CHECK-NEXT: .LBB80_2:
993 ; CHECK-NEXT: vmv1r.v v0, v24
994 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
995 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
997 %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
998 %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
999 %v = call <vscale x 32 x i32> @llvm.vp.smax.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
1000 ret <vscale x 32 x i32> %v
1003 define <vscale x 32 x i32> @vmax_vx_nxv32i32_unmasked(<vscale x 32 x i32> %va, i32 %b, i32 zeroext %evl) {
1004 ; CHECK-LABEL: vmax_vx_nxv32i32_unmasked:
1006 ; CHECK-NEXT: csrr a2, vlenb
1007 ; CHECK-NEXT: slli a2, a2, 1
1008 ; CHECK-NEXT: sub a3, a1, a2
1009 ; CHECK-NEXT: sltu a4, a1, a3
1010 ; CHECK-NEXT: addi a4, a4, -1
1011 ; CHECK-NEXT: and a3, a4, a3
1012 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
1013 ; CHECK-NEXT: vmax.vx v16, v16, a0
1014 ; CHECK-NEXT: bltu a1, a2, .LBB81_2
1015 ; CHECK-NEXT: # %bb.1:
1016 ; CHECK-NEXT: mv a1, a2
1017 ; CHECK-NEXT: .LBB81_2:
1018 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1019 ; CHECK-NEXT: vmax.vx v8, v8, a0
1021 %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
1022 %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
1023 %v = call <vscale x 32 x i32> @llvm.vp.smax.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
1024 ret <vscale x 32 x i32> %v
1027 ; Test splitting when the %evl is a constant (albeit an unknown one).
1029 declare i32 @llvm.vscale.i32()
1031 ; FIXME: The upper half of the operation is doing nothing.
1032 ; FIXME: The branches comparing vscale vs. vscale should be constant-foldable.
1034 define <vscale x 32 x i32> @vmax_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
1035 ; CHECK-LABEL: vmax_vx_nxv32i32_evl_nx8:
1037 ; CHECK-NEXT: vmv1r.v v24, v0
1038 ; CHECK-NEXT: csrr a1, vlenb
1039 ; CHECK-NEXT: srli a2, a1, 2
1040 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
1041 ; CHECK-NEXT: vslidedown.vx v0, v0, a2
1042 ; CHECK-NEXT: slli a2, a1, 1
1043 ; CHECK-NEXT: sub a3, a1, a2
1044 ; CHECK-NEXT: sltu a4, a1, a3
1045 ; CHECK-NEXT: addi a4, a4, -1
1046 ; CHECK-NEXT: and a3, a4, a3
1047 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
1048 ; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t
1049 ; CHECK-NEXT: bltu a1, a2, .LBB82_2
1050 ; CHECK-NEXT: # %bb.1:
1051 ; CHECK-NEXT: mv a1, a2
1052 ; CHECK-NEXT: .LBB82_2:
1053 ; CHECK-NEXT: vmv1r.v v0, v24
1054 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1055 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
1057 %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
1058 %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
1059 %evl = call i32 @llvm.vscale.i32()
1060 %evl0 = mul i32 %evl, 8
1061 %v = call <vscale x 32 x i32> @llvm.vp.smax.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl0)
1062 ret <vscale x 32 x i32> %v
1065 ; FIXME: The first vmax.vx should be able to infer that its AVL is equivalent to VLMAX.
1066 ; FIXME: The upper half of the operation is doing nothing but we don't catch
1067 ; that on RV64; we issue a usubsat(and (vscale x 16), 0xffffffff, vscale x 16)
1068 ; (the "original" %evl is the "and", due to known-bits issues with legalizing
1069 ; the i32 %evl to i64) and this isn't detected as 0.
1070 ; This could be resolved in the future with more detailed KnownBits analysis
1073 define <vscale x 32 x i32> @vmax_vx_nxv32i32_evl_nx16(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
1074 ; RV32-LABEL: vmax_vx_nxv32i32_evl_nx16:
1076 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
1077 ; RV32-NEXT: vmax.vx v8, v8, a0, v0.t
1080 ; RV64-LABEL: vmax_vx_nxv32i32_evl_nx16:
1082 ; RV64-NEXT: csrr a1, vlenb
1083 ; RV64-NEXT: srli a1, a1, 2
1084 ; RV64-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
1085 ; RV64-NEXT: vslidedown.vx v24, v0, a1
1086 ; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, ma
1087 ; RV64-NEXT: vmax.vx v8, v8, a0, v0.t
1088 ; RV64-NEXT: vmv1r.v v0, v24
1089 ; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
1090 ; RV64-NEXT: vmax.vx v16, v16, a0, v0.t
1092 %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
1093 %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
1094 %evl = call i32 @llvm.vscale.i32()
1095 %evl0 = mul i32 %evl, 16
1096 %v = call <vscale x 32 x i32> @llvm.vp.smax.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl0)
1097 ret <vscale x 32 x i32> %v
1100 declare <vscale x 1 x i64> @llvm.vp.smax.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1102 define <vscale x 1 x i64> @vmax_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1103 ; CHECK-LABEL: vmax_vv_nxv1i64:
1105 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1106 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
1108 %v = call <vscale x 1 x i64> @llvm.vp.smax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
1109 ret <vscale x 1 x i64> %v
1112 define <vscale x 1 x i64> @vmax_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
1113 ; CHECK-LABEL: vmax_vv_nxv1i64_unmasked:
1115 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1116 ; CHECK-NEXT: vmax.vv v8, v8, v9
1118 %v = call <vscale x 1 x i64> @llvm.vp.smax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1119 ret <vscale x 1 x i64> %v
1122 define <vscale x 1 x i64> @vmax_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1123 ; RV32-LABEL: vmax_vx_nxv1i64:
1125 ; RV32-NEXT: addi sp, sp, -16
1126 ; RV32-NEXT: .cfi_def_cfa_offset 16
1127 ; RV32-NEXT: sw a1, 12(sp)
1128 ; RV32-NEXT: sw a0, 8(sp)
1129 ; RV32-NEXT: addi a0, sp, 8
1130 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1131 ; RV32-NEXT: vlse64.v v9, (a0), zero
1132 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1133 ; RV32-NEXT: vmax.vv v8, v8, v9, v0.t
1134 ; RV32-NEXT: addi sp, sp, 16
1137 ; RV64-LABEL: vmax_vx_nxv1i64:
1139 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1140 ; RV64-NEXT: vmax.vx v8, v8, a0, v0.t
1142 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1143 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1144 %v = call <vscale x 1 x i64> @llvm.vp.smax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
1145 ret <vscale x 1 x i64> %v
1148 define <vscale x 1 x i64> @vmax_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
1149 ; RV32-LABEL: vmax_vx_nxv1i64_unmasked:
1151 ; RV32-NEXT: addi sp, sp, -16
1152 ; RV32-NEXT: .cfi_def_cfa_offset 16
1153 ; RV32-NEXT: sw a1, 12(sp)
1154 ; RV32-NEXT: sw a0, 8(sp)
1155 ; RV32-NEXT: addi a0, sp, 8
1156 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1157 ; RV32-NEXT: vlse64.v v9, (a0), zero
1158 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1159 ; RV32-NEXT: vmax.vv v8, v8, v9
1160 ; RV32-NEXT: addi sp, sp, 16
1163 ; RV64-LABEL: vmax_vx_nxv1i64_unmasked:
1165 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1166 ; RV64-NEXT: vmax.vx v8, v8, a0
1168 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1169 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1170 %v = call <vscale x 1 x i64> @llvm.vp.smax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1171 ret <vscale x 1 x i64> %v
1174 declare <vscale x 2 x i64> @llvm.vp.smax.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1176 define <vscale x 2 x i64> @vmax_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1177 ; CHECK-LABEL: vmax_vv_nxv2i64:
1179 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1180 ; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
1182 %v = call <vscale x 2 x i64> @llvm.vp.smax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
1183 ret <vscale x 2 x i64> %v
1186 define <vscale x 2 x i64> @vmax_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
1187 ; CHECK-LABEL: vmax_vv_nxv2i64_unmasked:
1189 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1190 ; CHECK-NEXT: vmax.vv v8, v8, v10
1192 %v = call <vscale x 2 x i64> @llvm.vp.smax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1193 ret <vscale x 2 x i64> %v
1196 define <vscale x 2 x i64> @vmax_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1197 ; RV32-LABEL: vmax_vx_nxv2i64:
1199 ; RV32-NEXT: addi sp, sp, -16
1200 ; RV32-NEXT: .cfi_def_cfa_offset 16
1201 ; RV32-NEXT: sw a1, 12(sp)
1202 ; RV32-NEXT: sw a0, 8(sp)
1203 ; RV32-NEXT: addi a0, sp, 8
1204 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1205 ; RV32-NEXT: vlse64.v v10, (a0), zero
1206 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1207 ; RV32-NEXT: vmax.vv v8, v8, v10, v0.t
1208 ; RV32-NEXT: addi sp, sp, 16
1211 ; RV64-LABEL: vmax_vx_nxv2i64:
1213 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1214 ; RV64-NEXT: vmax.vx v8, v8, a0, v0.t
1216 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1217 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1218 %v = call <vscale x 2 x i64> @llvm.vp.smax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
1219 ret <vscale x 2 x i64> %v
1222 define <vscale x 2 x i64> @vmax_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
1223 ; RV32-LABEL: vmax_vx_nxv2i64_unmasked:
1225 ; RV32-NEXT: addi sp, sp, -16
1226 ; RV32-NEXT: .cfi_def_cfa_offset 16
1227 ; RV32-NEXT: sw a1, 12(sp)
1228 ; RV32-NEXT: sw a0, 8(sp)
1229 ; RV32-NEXT: addi a0, sp, 8
1230 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1231 ; RV32-NEXT: vlse64.v v10, (a0), zero
1232 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1233 ; RV32-NEXT: vmax.vv v8, v8, v10
1234 ; RV32-NEXT: addi sp, sp, 16
1237 ; RV64-LABEL: vmax_vx_nxv2i64_unmasked:
1239 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1240 ; RV64-NEXT: vmax.vx v8, v8, a0
1242 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1243 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1244 %v = call <vscale x 2 x i64> @llvm.vp.smax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1245 ret <vscale x 2 x i64> %v
1248 declare <vscale x 4 x i64> @llvm.vp.smax.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1250 define <vscale x 4 x i64> @vmax_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1251 ; CHECK-LABEL: vmax_vv_nxv4i64:
1253 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1254 ; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
1256 %v = call <vscale x 4 x i64> @llvm.vp.smax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
1257 ret <vscale x 4 x i64> %v
1260 define <vscale x 4 x i64> @vmax_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
1261 ; CHECK-LABEL: vmax_vv_nxv4i64_unmasked:
1263 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1264 ; CHECK-NEXT: vmax.vv v8, v8, v12
1266 %v = call <vscale x 4 x i64> @llvm.vp.smax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1267 ret <vscale x 4 x i64> %v
1270 define <vscale x 4 x i64> @vmax_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1271 ; RV32-LABEL: vmax_vx_nxv4i64:
1273 ; RV32-NEXT: addi sp, sp, -16
1274 ; RV32-NEXT: .cfi_def_cfa_offset 16
1275 ; RV32-NEXT: sw a1, 12(sp)
1276 ; RV32-NEXT: sw a0, 8(sp)
1277 ; RV32-NEXT: addi a0, sp, 8
1278 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1279 ; RV32-NEXT: vlse64.v v12, (a0), zero
1280 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1281 ; RV32-NEXT: vmax.vv v8, v8, v12, v0.t
1282 ; RV32-NEXT: addi sp, sp, 16
1285 ; RV64-LABEL: vmax_vx_nxv4i64:
1287 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1288 ; RV64-NEXT: vmax.vx v8, v8, a0, v0.t
1290 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1291 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1292 %v = call <vscale x 4 x i64> @llvm.vp.smax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
1293 ret <vscale x 4 x i64> %v
1296 define <vscale x 4 x i64> @vmax_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1297 ; RV32-LABEL: vmax_vx_nxv4i64_unmasked:
1299 ; RV32-NEXT: addi sp, sp, -16
1300 ; RV32-NEXT: .cfi_def_cfa_offset 16
1301 ; RV32-NEXT: sw a1, 12(sp)
1302 ; RV32-NEXT: sw a0, 8(sp)
1303 ; RV32-NEXT: addi a0, sp, 8
1304 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1305 ; RV32-NEXT: vlse64.v v12, (a0), zero
1306 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1307 ; RV32-NEXT: vmax.vv v8, v8, v12
1308 ; RV32-NEXT: addi sp, sp, 16
1311 ; RV64-LABEL: vmax_vx_nxv4i64_unmasked:
1313 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1314 ; RV64-NEXT: vmax.vx v8, v8, a0
1316 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1317 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1318 %v = call <vscale x 4 x i64> @llvm.vp.smax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1319 ret <vscale x 4 x i64> %v
1322 declare <vscale x 8 x i64> @llvm.vp.smax.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1324 define <vscale x 8 x i64> @vmax_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1325 ; CHECK-LABEL: vmax_vv_nxv8i64:
1327 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1328 ; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
1330 %v = call <vscale x 8 x i64> @llvm.vp.smax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
1331 ret <vscale x 8 x i64> %v
1334 define <vscale x 8 x i64> @vmax_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
1335 ; CHECK-LABEL: vmax_vv_nxv8i64_unmasked:
1337 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1338 ; CHECK-NEXT: vmax.vv v8, v8, v16
1340 %v = call <vscale x 8 x i64> @llvm.vp.smax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1341 ret <vscale x 8 x i64> %v
1344 define <vscale x 8 x i64> @vmax_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1345 ; RV32-LABEL: vmax_vx_nxv8i64:
1347 ; RV32-NEXT: addi sp, sp, -16
1348 ; RV32-NEXT: .cfi_def_cfa_offset 16
1349 ; RV32-NEXT: sw a1, 12(sp)
1350 ; RV32-NEXT: sw a0, 8(sp)
1351 ; RV32-NEXT: addi a0, sp, 8
1352 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1353 ; RV32-NEXT: vlse64.v v16, (a0), zero
1354 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1355 ; RV32-NEXT: vmax.vv v8, v8, v16, v0.t
1356 ; RV32-NEXT: addi sp, sp, 16
1359 ; RV64-LABEL: vmax_vx_nxv8i64:
1361 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1362 ; RV64-NEXT: vmax.vx v8, v8, a0, v0.t
1364 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1365 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1366 %v = call <vscale x 8 x i64> @llvm.vp.smax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1367 ret <vscale x 8 x i64> %v
1370 define <vscale x 8 x i64> @vmax_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1371 ; RV32-LABEL: vmax_vx_nxv8i64_unmasked:
1373 ; RV32-NEXT: addi sp, sp, -16
1374 ; RV32-NEXT: .cfi_def_cfa_offset 16
1375 ; RV32-NEXT: sw a1, 12(sp)
1376 ; RV32-NEXT: sw a0, 8(sp)
1377 ; RV32-NEXT: addi a0, sp, 8
1378 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1379 ; RV32-NEXT: vlse64.v v16, (a0), zero
1380 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1381 ; RV32-NEXT: vmax.vv v8, v8, v16
1382 ; RV32-NEXT: addi sp, sp, 16
1385 ; RV64-LABEL: vmax_vx_nxv8i64_unmasked:
1387 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1388 ; RV64-NEXT: vmax.vx v8, v8, a0
1390 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1391 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1392 %v = call <vscale x 8 x i64> @llvm.vp.smax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1393 ret <vscale x 8 x i64> %v