1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 8 x i7> @llvm.vp.smin.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
9 define <vscale x 8 x i7> @vmin_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
10 ; CHECK-LABEL: vmin_vx_nxv8i7:
12 ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
13 ; CHECK-NEXT: vadd.vv v8, v8, v8
14 ; CHECK-NEXT: vsra.vi v8, v8, 1
15 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
16 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
18 %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
19 %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
20 %v = call <vscale x 8 x i7> @llvm.vp.smin.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
21 ret <vscale x 8 x i7> %v
24 declare <vscale x 1 x i8> @llvm.vp.smin.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
26 define <vscale x 1 x i8> @vmin_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
27 ; CHECK-LABEL: vmin_vv_nxv1i8:
29 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
30 ; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
32 %v = call <vscale x 1 x i8> @llvm.vp.smin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
33 ret <vscale x 1 x i8> %v
36 define <vscale x 1 x i8> @vmin_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
37 ; CHECK-LABEL: vmin_vv_nxv1i8_unmasked:
39 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
40 ; CHECK-NEXT: vmin.vv v8, v8, v9
42 %v = call <vscale x 1 x i8> @llvm.vp.smin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
43 ret <vscale x 1 x i8> %v
46 define <vscale x 1 x i8> @vmin_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
47 ; CHECK-LABEL: vmin_vx_nxv1i8:
49 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
50 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
52 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
53 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
54 %v = call <vscale x 1 x i8> @llvm.vp.smin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
55 ret <vscale x 1 x i8> %v
58 define <vscale x 1 x i8> @vmin_vx_nxv1i8_commute(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
59 ; CHECK-LABEL: vmin_vx_nxv1i8_commute:
61 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
62 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
64 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
65 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
66 %v = call <vscale x 1 x i8> @llvm.vp.smin.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
67 ret <vscale x 1 x i8> %v
70 define <vscale x 1 x i8> @vmin_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
71 ; CHECK-LABEL: vmin_vx_nxv1i8_unmasked:
73 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
74 ; CHECK-NEXT: vmin.vx v8, v8, a0
76 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
77 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
78 %v = call <vscale x 1 x i8> @llvm.vp.smin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
79 ret <vscale x 1 x i8> %v
82 declare <vscale x 2 x i8> @llvm.vp.smin.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
84 define <vscale x 2 x i8> @vmin_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
85 ; CHECK-LABEL: vmin_vv_nxv2i8:
87 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
88 ; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
90 %v = call <vscale x 2 x i8> @llvm.vp.smin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
91 ret <vscale x 2 x i8> %v
94 define <vscale x 2 x i8> @vmin_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
95 ; CHECK-LABEL: vmin_vv_nxv2i8_unmasked:
97 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
98 ; CHECK-NEXT: vmin.vv v8, v8, v9
100 %v = call <vscale x 2 x i8> @llvm.vp.smin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
101 ret <vscale x 2 x i8> %v
104 define <vscale x 2 x i8> @vmin_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
105 ; CHECK-LABEL: vmin_vx_nxv2i8:
107 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
108 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
110 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
111 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
112 %v = call <vscale x 2 x i8> @llvm.vp.smin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
113 ret <vscale x 2 x i8> %v
116 define <vscale x 2 x i8> @vmin_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
117 ; CHECK-LABEL: vmin_vx_nxv2i8_unmasked:
119 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
120 ; CHECK-NEXT: vmin.vx v8, v8, a0
122 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
123 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
124 %v = call <vscale x 2 x i8> @llvm.vp.smin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
125 ret <vscale x 2 x i8> %v
128 declare <vscale x 3 x i8> @llvm.vp.smin.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
130 define <vscale x 3 x i8> @vmin_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
131 ; CHECK-LABEL: vmin_vv_nxv3i8:
133 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
134 ; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
136 %v = call <vscale x 3 x i8> @llvm.vp.smin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
137 ret <vscale x 3 x i8> %v
140 define <vscale x 3 x i8> @vmin_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, i32 zeroext %evl) {
141 ; CHECK-LABEL: vmin_vv_nxv3i8_unmasked:
143 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
144 ; CHECK-NEXT: vmin.vv v8, v8, v9
146 %v = call <vscale x 3 x i8> @llvm.vp.smin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> splat (i1 true), i32 %evl)
147 ret <vscale x 3 x i8> %v
150 define <vscale x 3 x i8> @vmin_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
151 ; CHECK-LABEL: vmin_vx_nxv3i8:
153 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
154 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
156 %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
157 %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
158 %v = call <vscale x 3 x i8> @llvm.vp.smin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
159 ret <vscale x 3 x i8> %v
162 define <vscale x 3 x i8> @vmin_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b, i32 zeroext %evl) {
163 ; CHECK-LABEL: vmin_vx_nxv3i8_unmasked:
165 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
166 ; CHECK-NEXT: vmin.vx v8, v8, a0
168 %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
169 %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
170 %v = call <vscale x 3 x i8> @llvm.vp.smin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> splat (i1 true), i32 %evl)
171 ret <vscale x 3 x i8> %v
174 declare <vscale x 4 x i8> @llvm.vp.smin.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
176 define <vscale x 4 x i8> @vmin_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
177 ; CHECK-LABEL: vmin_vv_nxv4i8:
179 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
180 ; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
182 %v = call <vscale x 4 x i8> @llvm.vp.smin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
183 ret <vscale x 4 x i8> %v
186 define <vscale x 4 x i8> @vmin_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
187 ; CHECK-LABEL: vmin_vv_nxv4i8_unmasked:
189 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
190 ; CHECK-NEXT: vmin.vv v8, v8, v9
192 %v = call <vscale x 4 x i8> @llvm.vp.smin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
193 ret <vscale x 4 x i8> %v
196 define <vscale x 4 x i8> @vmin_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
197 ; CHECK-LABEL: vmin_vx_nxv4i8:
199 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
200 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
202 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
203 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
204 %v = call <vscale x 4 x i8> @llvm.vp.smin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
205 ret <vscale x 4 x i8> %v
208 define <vscale x 4 x i8> @vmin_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
209 ; CHECK-LABEL: vmin_vx_nxv4i8_unmasked:
211 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
212 ; CHECK-NEXT: vmin.vx v8, v8, a0
214 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
215 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
216 %v = call <vscale x 4 x i8> @llvm.vp.smin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
217 ret <vscale x 4 x i8> %v
220 declare <vscale x 8 x i8> @llvm.vp.smin.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
222 define <vscale x 8 x i8> @vmin_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
223 ; CHECK-LABEL: vmin_vv_nxv8i8:
225 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
226 ; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
228 %v = call <vscale x 8 x i8> @llvm.vp.smin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
229 ret <vscale x 8 x i8> %v
232 define <vscale x 8 x i8> @vmin_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
233 ; CHECK-LABEL: vmin_vv_nxv8i8_unmasked:
235 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
236 ; CHECK-NEXT: vmin.vv v8, v8, v9
238 %v = call <vscale x 8 x i8> @llvm.vp.smin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
239 ret <vscale x 8 x i8> %v
242 define <vscale x 8 x i8> @vmin_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
243 ; CHECK-LABEL: vmin_vx_nxv8i8:
245 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
246 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
248 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
249 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
250 %v = call <vscale x 8 x i8> @llvm.vp.smin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
251 ret <vscale x 8 x i8> %v
254 define <vscale x 8 x i8> @vmin_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
255 ; CHECK-LABEL: vmin_vx_nxv8i8_unmasked:
257 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
258 ; CHECK-NEXT: vmin.vx v8, v8, a0
260 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
261 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
262 %v = call <vscale x 8 x i8> @llvm.vp.smin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
263 ret <vscale x 8 x i8> %v
266 declare <vscale x 16 x i8> @llvm.vp.smin.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
268 define <vscale x 16 x i8> @vmin_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
269 ; CHECK-LABEL: vmin_vv_nxv16i8:
271 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
272 ; CHECK-NEXT: vmin.vv v8, v8, v10, v0.t
274 %v = call <vscale x 16 x i8> @llvm.vp.smin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
275 ret <vscale x 16 x i8> %v
278 define <vscale x 16 x i8> @vmin_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
279 ; CHECK-LABEL: vmin_vv_nxv16i8_unmasked:
281 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
282 ; CHECK-NEXT: vmin.vv v8, v8, v10
284 %v = call <vscale x 16 x i8> @llvm.vp.smin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
285 ret <vscale x 16 x i8> %v
288 define <vscale x 16 x i8> @vmin_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
289 ; CHECK-LABEL: vmin_vx_nxv16i8:
291 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
292 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
294 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
295 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
296 %v = call <vscale x 16 x i8> @llvm.vp.smin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
297 ret <vscale x 16 x i8> %v
300 define <vscale x 16 x i8> @vmin_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
301 ; CHECK-LABEL: vmin_vx_nxv16i8_unmasked:
303 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
304 ; CHECK-NEXT: vmin.vx v8, v8, a0
306 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
307 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
308 %v = call <vscale x 16 x i8> @llvm.vp.smin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
309 ret <vscale x 16 x i8> %v
312 declare <vscale x 32 x i8> @llvm.vp.smin.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
314 define <vscale x 32 x i8> @vmin_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
315 ; CHECK-LABEL: vmin_vv_nxv32i8:
317 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
318 ; CHECK-NEXT: vmin.vv v8, v8, v12, v0.t
320 %v = call <vscale x 32 x i8> @llvm.vp.smin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
321 ret <vscale x 32 x i8> %v
324 define <vscale x 32 x i8> @vmin_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
325 ; CHECK-LABEL: vmin_vv_nxv32i8_unmasked:
327 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
328 ; CHECK-NEXT: vmin.vv v8, v8, v12
330 %v = call <vscale x 32 x i8> @llvm.vp.smin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
331 ret <vscale x 32 x i8> %v
334 define <vscale x 32 x i8> @vmin_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
335 ; CHECK-LABEL: vmin_vx_nxv32i8:
337 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
338 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
340 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
341 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
342 %v = call <vscale x 32 x i8> @llvm.vp.smin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
343 ret <vscale x 32 x i8> %v
346 define <vscale x 32 x i8> @vmin_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
347 ; CHECK-LABEL: vmin_vx_nxv32i8_unmasked:
349 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
350 ; CHECK-NEXT: vmin.vx v8, v8, a0
352 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
353 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
354 %v = call <vscale x 32 x i8> @llvm.vp.smin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
355 ret <vscale x 32 x i8> %v
358 declare <vscale x 64 x i8> @llvm.vp.smin.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
360 define <vscale x 64 x i8> @vmin_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
361 ; CHECK-LABEL: vmin_vv_nxv64i8:
363 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
364 ; CHECK-NEXT: vmin.vv v8, v8, v16, v0.t
366 %v = call <vscale x 64 x i8> @llvm.vp.smin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
367 ret <vscale x 64 x i8> %v
370 define <vscale x 64 x i8> @vmin_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
371 ; CHECK-LABEL: vmin_vv_nxv64i8_unmasked:
373 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
374 ; CHECK-NEXT: vmin.vv v8, v8, v16
376 %v = call <vscale x 64 x i8> @llvm.vp.smin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
377 ret <vscale x 64 x i8> %v
380 define <vscale x 64 x i8> @vmin_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
381 ; CHECK-LABEL: vmin_vx_nxv64i8:
383 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
384 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
386 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
387 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
388 %v = call <vscale x 64 x i8> @llvm.vp.smin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
389 ret <vscale x 64 x i8> %v
392 define <vscale x 64 x i8> @vmin_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
393 ; CHECK-LABEL: vmin_vx_nxv64i8_unmasked:
395 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
396 ; CHECK-NEXT: vmin.vx v8, v8, a0
398 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
399 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
400 %v = call <vscale x 64 x i8> @llvm.vp.smin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
401 ret <vscale x 64 x i8> %v
404 ; Test that split-legalization works when the mask itself needs splitting.
406 declare <vscale x 128 x i8> @llvm.vp.smin.nxv128i8(<vscale x 128 x i8>, <vscale x 128 x i8>, <vscale x 128 x i1>, i32)
408 define <vscale x 128 x i8> @vmin_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
409 ; CHECK-LABEL: vmin_vx_nxv128i8:
411 ; CHECK-NEXT: vmv1r.v v24, v0
412 ; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
413 ; CHECK-NEXT: vlm.v v0, (a1)
414 ; CHECK-NEXT: csrr a1, vlenb
415 ; CHECK-NEXT: slli a1, a1, 3
416 ; CHECK-NEXT: sub a3, a2, a1
417 ; CHECK-NEXT: sltu a4, a2, a3
418 ; CHECK-NEXT: addi a4, a4, -1
419 ; CHECK-NEXT: and a3, a4, a3
420 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
421 ; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t
422 ; CHECK-NEXT: bltu a2, a1, .LBB34_2
423 ; CHECK-NEXT: # %bb.1:
424 ; CHECK-NEXT: mv a2, a1
425 ; CHECK-NEXT: .LBB34_2:
426 ; CHECK-NEXT: vmv1r.v v0, v24
427 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
428 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
430 %elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
431 %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
432 %v = call <vscale x 128 x i8> @llvm.vp.smin.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
433 ret <vscale x 128 x i8> %v
436 define <vscale x 128 x i8> @vmin_vx_nxv128i8_unmasked(<vscale x 128 x i8> %va, i8 %b, i32 zeroext %evl) {
437 ; CHECK-LABEL: vmin_vx_nxv128i8_unmasked:
439 ; CHECK-NEXT: csrr a2, vlenb
440 ; CHECK-NEXT: slli a2, a2, 3
441 ; CHECK-NEXT: sub a3, a1, a2
442 ; CHECK-NEXT: sltu a4, a1, a3
443 ; CHECK-NEXT: addi a4, a4, -1
444 ; CHECK-NEXT: and a3, a4, a3
445 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
446 ; CHECK-NEXT: vmin.vx v16, v16, a0
447 ; CHECK-NEXT: bltu a1, a2, .LBB35_2
448 ; CHECK-NEXT: # %bb.1:
449 ; CHECK-NEXT: mv a1, a2
450 ; CHECK-NEXT: .LBB35_2:
451 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
452 ; CHECK-NEXT: vmin.vx v8, v8, a0
454 %elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
455 %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
456 %v = call <vscale x 128 x i8> @llvm.vp.smin.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> splat (i1 true), i32 %evl)
457 ret <vscale x 128 x i8> %v
460 declare <vscale x 1 x i16> @llvm.vp.smin.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
462 define <vscale x 1 x i16> @vmin_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
463 ; CHECK-LABEL: vmin_vv_nxv1i16:
465 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
466 ; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
468 %v = call <vscale x 1 x i16> @llvm.vp.smin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
469 ret <vscale x 1 x i16> %v
472 define <vscale x 1 x i16> @vmin_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
473 ; CHECK-LABEL: vmin_vv_nxv1i16_unmasked:
475 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
476 ; CHECK-NEXT: vmin.vv v8, v8, v9
478 %v = call <vscale x 1 x i16> @llvm.vp.smin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
479 ret <vscale x 1 x i16> %v
482 define <vscale x 1 x i16> @vmin_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
483 ; CHECK-LABEL: vmin_vx_nxv1i16:
485 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
486 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
488 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
489 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
490 %v = call <vscale x 1 x i16> @llvm.vp.smin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
491 ret <vscale x 1 x i16> %v
494 define <vscale x 1 x i16> @vmin_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
495 ; CHECK-LABEL: vmin_vx_nxv1i16_unmasked:
497 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
498 ; CHECK-NEXT: vmin.vx v8, v8, a0
500 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
501 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
502 %v = call <vscale x 1 x i16> @llvm.vp.smin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
503 ret <vscale x 1 x i16> %v
506 declare <vscale x 2 x i16> @llvm.vp.smin.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
508 define <vscale x 2 x i16> @vmin_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
509 ; CHECK-LABEL: vmin_vv_nxv2i16:
511 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
512 ; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
514 %v = call <vscale x 2 x i16> @llvm.vp.smin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
515 ret <vscale x 2 x i16> %v
518 define <vscale x 2 x i16> @vmin_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
519 ; CHECK-LABEL: vmin_vv_nxv2i16_unmasked:
521 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
522 ; CHECK-NEXT: vmin.vv v8, v8, v9
524 %v = call <vscale x 2 x i16> @llvm.vp.smin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
525 ret <vscale x 2 x i16> %v
528 define <vscale x 2 x i16> @vmin_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
529 ; CHECK-LABEL: vmin_vx_nxv2i16:
531 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
532 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
534 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
535 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
536 %v = call <vscale x 2 x i16> @llvm.vp.smin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
537 ret <vscale x 2 x i16> %v
540 define <vscale x 2 x i16> @vmin_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
541 ; CHECK-LABEL: vmin_vx_nxv2i16_unmasked:
543 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
544 ; CHECK-NEXT: vmin.vx v8, v8, a0
546 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
547 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
548 %v = call <vscale x 2 x i16> @llvm.vp.smin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
549 ret <vscale x 2 x i16> %v
552 declare <vscale x 4 x i16> @llvm.vp.smin.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
554 define <vscale x 4 x i16> @vmin_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
555 ; CHECK-LABEL: vmin_vv_nxv4i16:
557 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
558 ; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
560 %v = call <vscale x 4 x i16> @llvm.vp.smin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
561 ret <vscale x 4 x i16> %v
564 define <vscale x 4 x i16> @vmin_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
565 ; CHECK-LABEL: vmin_vv_nxv4i16_unmasked:
567 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
568 ; CHECK-NEXT: vmin.vv v8, v8, v9
570 %v = call <vscale x 4 x i16> @llvm.vp.smin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
571 ret <vscale x 4 x i16> %v
574 define <vscale x 4 x i16> @vmin_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
575 ; CHECK-LABEL: vmin_vx_nxv4i16:
577 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
578 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
580 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
581 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
582 %v = call <vscale x 4 x i16> @llvm.vp.smin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
583 ret <vscale x 4 x i16> %v
586 define <vscale x 4 x i16> @vmin_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
587 ; CHECK-LABEL: vmin_vx_nxv4i16_unmasked:
589 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
590 ; CHECK-NEXT: vmin.vx v8, v8, a0
592 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
593 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
594 %v = call <vscale x 4 x i16> @llvm.vp.smin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
595 ret <vscale x 4 x i16> %v
598 declare <vscale x 8 x i16> @llvm.vp.smin.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
600 define <vscale x 8 x i16> @vmin_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
601 ; CHECK-LABEL: vmin_vv_nxv8i16:
603 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
604 ; CHECK-NEXT: vmin.vv v8, v8, v10, v0.t
606 %v = call <vscale x 8 x i16> @llvm.vp.smin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
607 ret <vscale x 8 x i16> %v
610 define <vscale x 8 x i16> @vmin_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
611 ; CHECK-LABEL: vmin_vv_nxv8i16_unmasked:
613 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
614 ; CHECK-NEXT: vmin.vv v8, v8, v10
616 %v = call <vscale x 8 x i16> @llvm.vp.smin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
617 ret <vscale x 8 x i16> %v
620 define <vscale x 8 x i16> @vmin_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
621 ; CHECK-LABEL: vmin_vx_nxv8i16:
623 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
624 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
626 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
627 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
628 %v = call <vscale x 8 x i16> @llvm.vp.smin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
629 ret <vscale x 8 x i16> %v
632 define <vscale x 8 x i16> @vmin_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
633 ; CHECK-LABEL: vmin_vx_nxv8i16_unmasked:
635 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
636 ; CHECK-NEXT: vmin.vx v8, v8, a0
638 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
639 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
640 %v = call <vscale x 8 x i16> @llvm.vp.smin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
641 ret <vscale x 8 x i16> %v
644 declare <vscale x 16 x i16> @llvm.vp.smin.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
646 define <vscale x 16 x i16> @vmin_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
647 ; CHECK-LABEL: vmin_vv_nxv16i16:
649 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
650 ; CHECK-NEXT: vmin.vv v8, v8, v12, v0.t
652 %v = call <vscale x 16 x i16> @llvm.vp.smin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
653 ret <vscale x 16 x i16> %v
656 define <vscale x 16 x i16> @vmin_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
657 ; CHECK-LABEL: vmin_vv_nxv16i16_unmasked:
659 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
660 ; CHECK-NEXT: vmin.vv v8, v8, v12
662 %v = call <vscale x 16 x i16> @llvm.vp.smin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
663 ret <vscale x 16 x i16> %v
666 define <vscale x 16 x i16> @vmin_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
667 ; CHECK-LABEL: vmin_vx_nxv16i16:
669 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
670 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
672 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
673 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
674 %v = call <vscale x 16 x i16> @llvm.vp.smin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
675 ret <vscale x 16 x i16> %v
678 define <vscale x 16 x i16> @vmin_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
679 ; CHECK-LABEL: vmin_vx_nxv16i16_unmasked:
681 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
682 ; CHECK-NEXT: vmin.vx v8, v8, a0
684 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
685 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
686 %v = call <vscale x 16 x i16> @llvm.vp.smin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
687 ret <vscale x 16 x i16> %v
690 declare <vscale x 32 x i16> @llvm.vp.smin.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
692 define <vscale x 32 x i16> @vmin_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
693 ; CHECK-LABEL: vmin_vv_nxv32i16:
695 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
696 ; CHECK-NEXT: vmin.vv v8, v8, v16, v0.t
698 %v = call <vscale x 32 x i16> @llvm.vp.smin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
699 ret <vscale x 32 x i16> %v
702 define <vscale x 32 x i16> @vmin_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
703 ; CHECK-LABEL: vmin_vv_nxv32i16_unmasked:
705 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
706 ; CHECK-NEXT: vmin.vv v8, v8, v16
708 %v = call <vscale x 32 x i16> @llvm.vp.smin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
709 ret <vscale x 32 x i16> %v
712 define <vscale x 32 x i16> @vmin_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
713 ; CHECK-LABEL: vmin_vx_nxv32i16:
715 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
716 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
718 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
719 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
720 %v = call <vscale x 32 x i16> @llvm.vp.smin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
721 ret <vscale x 32 x i16> %v
724 define <vscale x 32 x i16> @vmin_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
725 ; CHECK-LABEL: vmin_vx_nxv32i16_unmasked:
727 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
728 ; CHECK-NEXT: vmin.vx v8, v8, a0
730 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
731 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
732 %v = call <vscale x 32 x i16> @llvm.vp.smin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
733 ret <vscale x 32 x i16> %v
736 declare <vscale x 1 x i32> @llvm.vp.smin.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
738 define <vscale x 1 x i32> @vmin_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
739 ; CHECK-LABEL: vmin_vv_nxv1i32:
741 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
742 ; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
744 %v = call <vscale x 1 x i32> @llvm.vp.smin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
745 ret <vscale x 1 x i32> %v
748 define <vscale x 1 x i32> @vmin_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
749 ; CHECK-LABEL: vmin_vv_nxv1i32_unmasked:
751 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
752 ; CHECK-NEXT: vmin.vv v8, v8, v9
754 %v = call <vscale x 1 x i32> @llvm.vp.smin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
755 ret <vscale x 1 x i32> %v
758 define <vscale x 1 x i32> @vmin_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
759 ; CHECK-LABEL: vmin_vx_nxv1i32:
761 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
762 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
764 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
765 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
766 %v = call <vscale x 1 x i32> @llvm.vp.smin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
767 ret <vscale x 1 x i32> %v
770 define <vscale x 1 x i32> @vmin_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
771 ; CHECK-LABEL: vmin_vx_nxv1i32_unmasked:
773 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
774 ; CHECK-NEXT: vmin.vx v8, v8, a0
776 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
777 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
778 %v = call <vscale x 1 x i32> @llvm.vp.smin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
779 ret <vscale x 1 x i32> %v
782 declare <vscale x 2 x i32> @llvm.vp.smin.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
784 define <vscale x 2 x i32> @vmin_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
785 ; CHECK-LABEL: vmin_vv_nxv2i32:
787 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
788 ; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
790 %v = call <vscale x 2 x i32> @llvm.vp.smin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
791 ret <vscale x 2 x i32> %v
794 define <vscale x 2 x i32> @vmin_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
795 ; CHECK-LABEL: vmin_vv_nxv2i32_unmasked:
797 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
798 ; CHECK-NEXT: vmin.vv v8, v8, v9
800 %v = call <vscale x 2 x i32> @llvm.vp.smin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
801 ret <vscale x 2 x i32> %v
804 define <vscale x 2 x i32> @vmin_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
805 ; CHECK-LABEL: vmin_vx_nxv2i32:
807 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
808 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
810 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
811 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
812 %v = call <vscale x 2 x i32> @llvm.vp.smin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
813 ret <vscale x 2 x i32> %v
816 define <vscale x 2 x i32> @vmin_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
817 ; CHECK-LABEL: vmin_vx_nxv2i32_unmasked:
819 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
820 ; CHECK-NEXT: vmin.vx v8, v8, a0
822 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
823 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
824 %v = call <vscale x 2 x i32> @llvm.vp.smin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
825 ret <vscale x 2 x i32> %v
828 declare <vscale x 4 x i32> @llvm.vp.smin.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
830 define <vscale x 4 x i32> @vmin_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
831 ; CHECK-LABEL: vmin_vv_nxv4i32:
833 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
834 ; CHECK-NEXT: vmin.vv v8, v8, v10, v0.t
836 %v = call <vscale x 4 x i32> @llvm.vp.smin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
837 ret <vscale x 4 x i32> %v
840 define <vscale x 4 x i32> @vmin_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
841 ; CHECK-LABEL: vmin_vv_nxv4i32_unmasked:
843 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
844 ; CHECK-NEXT: vmin.vv v8, v8, v10
846 %v = call <vscale x 4 x i32> @llvm.vp.smin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
847 ret <vscale x 4 x i32> %v
850 define <vscale x 4 x i32> @vmin_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
851 ; CHECK-LABEL: vmin_vx_nxv4i32:
853 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
854 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
856 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
857 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
858 %v = call <vscale x 4 x i32> @llvm.vp.smin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
859 ret <vscale x 4 x i32> %v
862 define <vscale x 4 x i32> @vmin_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
863 ; CHECK-LABEL: vmin_vx_nxv4i32_unmasked:
865 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
866 ; CHECK-NEXT: vmin.vx v8, v8, a0
868 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
869 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
870 %v = call <vscale x 4 x i32> @llvm.vp.smin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
871 ret <vscale x 4 x i32> %v
874 declare <vscale x 8 x i32> @llvm.vp.smin.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
876 define <vscale x 8 x i32> @vmin_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
877 ; CHECK-LABEL: vmin_vv_nxv8i32:
879 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
880 ; CHECK-NEXT: vmin.vv v8, v8, v12, v0.t
882 %v = call <vscale x 8 x i32> @llvm.vp.smin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
883 ret <vscale x 8 x i32> %v
886 define <vscale x 8 x i32> @vmin_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
887 ; CHECK-LABEL: vmin_vv_nxv8i32_unmasked:
889 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
890 ; CHECK-NEXT: vmin.vv v8, v8, v12
892 %v = call <vscale x 8 x i32> @llvm.vp.smin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
893 ret <vscale x 8 x i32> %v
896 define <vscale x 8 x i32> @vmin_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
897 ; CHECK-LABEL: vmin_vx_nxv8i32:
899 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
900 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
902 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
903 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
904 %v = call <vscale x 8 x i32> @llvm.vp.smin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
905 ret <vscale x 8 x i32> %v
908 define <vscale x 8 x i32> @vmin_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
909 ; CHECK-LABEL: vmin_vx_nxv8i32_unmasked:
911 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
912 ; CHECK-NEXT: vmin.vx v8, v8, a0
914 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
915 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
916 %v = call <vscale x 8 x i32> @llvm.vp.smin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
917 ret <vscale x 8 x i32> %v
920 declare <vscale x 16 x i32> @llvm.vp.smin.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
922 define <vscale x 16 x i32> @vmin_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
923 ; CHECK-LABEL: vmin_vv_nxv16i32:
925 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
926 ; CHECK-NEXT: vmin.vv v8, v8, v16, v0.t
928 %v = call <vscale x 16 x i32> @llvm.vp.smin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
929 ret <vscale x 16 x i32> %v
932 define <vscale x 16 x i32> @vmin_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
933 ; CHECK-LABEL: vmin_vv_nxv16i32_unmasked:
935 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
936 ; CHECK-NEXT: vmin.vv v8, v8, v16
938 %v = call <vscale x 16 x i32> @llvm.vp.smin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
939 ret <vscale x 16 x i32> %v
942 define <vscale x 16 x i32> @vmin_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
943 ; CHECK-LABEL: vmin_vx_nxv16i32:
945 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
946 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
948 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
949 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
950 %v = call <vscale x 16 x i32> @llvm.vp.smin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
951 ret <vscale x 16 x i32> %v
954 define <vscale x 16 x i32> @vmin_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
955 ; CHECK-LABEL: vmin_vx_nxv16i32_unmasked:
957 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
958 ; CHECK-NEXT: vmin.vx v8, v8, a0
960 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
961 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
962 %v = call <vscale x 16 x i32> @llvm.vp.smin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
963 ret <vscale x 16 x i32> %v
966 ; Test that split-legalization works then the mask needs manual splitting.
968 declare <vscale x 32 x i32> @llvm.vp.smin.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i32>, <vscale x 32 x i1>, i32)
970 define <vscale x 32 x i32> @vmin_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
971 ; CHECK-LABEL: vmin_vx_nxv32i32:
973 ; CHECK-NEXT: vmv1r.v v24, v0
974 ; CHECK-NEXT: csrr a2, vlenb
975 ; CHECK-NEXT: srli a3, a2, 2
976 ; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
977 ; CHECK-NEXT: vslidedown.vx v0, v0, a3
978 ; CHECK-NEXT: slli a2, a2, 1
979 ; CHECK-NEXT: sub a3, a1, a2
980 ; CHECK-NEXT: sltu a4, a1, a3
981 ; CHECK-NEXT: addi a4, a4, -1
982 ; CHECK-NEXT: and a3, a4, a3
983 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
984 ; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t
985 ; CHECK-NEXT: bltu a1, a2, .LBB80_2
986 ; CHECK-NEXT: # %bb.1:
987 ; CHECK-NEXT: mv a1, a2
988 ; CHECK-NEXT: .LBB80_2:
989 ; CHECK-NEXT: vmv1r.v v0, v24
990 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
991 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
993 %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
994 %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
995 %v = call <vscale x 32 x i32> @llvm.vp.smin.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
996 ret <vscale x 32 x i32> %v
999 define <vscale x 32 x i32> @vmin_vx_nxv32i32_unmasked(<vscale x 32 x i32> %va, i32 %b, i32 zeroext %evl) {
1000 ; CHECK-LABEL: vmin_vx_nxv32i32_unmasked:
1002 ; CHECK-NEXT: csrr a2, vlenb
1003 ; CHECK-NEXT: slli a2, a2, 1
1004 ; CHECK-NEXT: sub a3, a1, a2
1005 ; CHECK-NEXT: sltu a4, a1, a3
1006 ; CHECK-NEXT: addi a4, a4, -1
1007 ; CHECK-NEXT: and a3, a4, a3
1008 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
1009 ; CHECK-NEXT: vmin.vx v16, v16, a0
1010 ; CHECK-NEXT: bltu a1, a2, .LBB81_2
1011 ; CHECK-NEXT: # %bb.1:
1012 ; CHECK-NEXT: mv a1, a2
1013 ; CHECK-NEXT: .LBB81_2:
1014 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1015 ; CHECK-NEXT: vmin.vx v8, v8, a0
1017 %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
1018 %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
1019 %v = call <vscale x 32 x i32> @llvm.vp.smin.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
1020 ret <vscale x 32 x i32> %v
1023 ; Test splitting when the %evl is a constant (albeit an unknown one).
1025 declare i32 @llvm.vscale.i32()
1027 ; FIXME: The upper half of the operation is doing nothing.
1028 ; FIXME: The branches comparing vscale vs. vscale should be constant-foldable.
1030 define <vscale x 32 x i32> @vmin_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
1031 ; CHECK-LABEL: vmin_vx_nxv32i32_evl_nx8:
1033 ; CHECK-NEXT: vmv1r.v v24, v0
1034 ; CHECK-NEXT: csrr a1, vlenb
1035 ; CHECK-NEXT: srli a2, a1, 2
1036 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
1037 ; CHECK-NEXT: vslidedown.vx v0, v0, a2
1038 ; CHECK-NEXT: slli a2, a1, 1
1039 ; CHECK-NEXT: sub a3, a1, a2
1040 ; CHECK-NEXT: sltu a4, a1, a3
1041 ; CHECK-NEXT: addi a4, a4, -1
1042 ; CHECK-NEXT: and a3, a4, a3
1043 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
1044 ; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t
1045 ; CHECK-NEXT: bltu a1, a2, .LBB82_2
1046 ; CHECK-NEXT: # %bb.1:
1047 ; CHECK-NEXT: mv a1, a2
1048 ; CHECK-NEXT: .LBB82_2:
1049 ; CHECK-NEXT: vmv1r.v v0, v24
1050 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1051 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
1053 %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
1054 %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
1055 %evl = call i32 @llvm.vscale.i32()
1056 %evl0 = mul i32 %evl, 8
1057 %v = call <vscale x 32 x i32> @llvm.vp.smin.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl0)
1058 ret <vscale x 32 x i32> %v
1061 ; FIXME: The first vmin.vx should be able to infer that its AVL is equivalent to VLMAX.
1062 ; FIXME: The upper half of the operation is doing nothing but we don't catch
1063 ; that on RV64; we issue a usubsat(and (vscale x 16), 0xffffffff, vscale x 16)
1064 ; (the "original" %evl is the "and", due to known-bits issues with legalizing
1065 ; the i32 %evl to i64) and this isn't detected as 0.
1066 ; This could be resolved in the future with more detailed KnownBits analysis
1069 define <vscale x 32 x i32> @vmin_vx_nxv32i32_evl_nx16(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
1070 ; RV32-LABEL: vmin_vx_nxv32i32_evl_nx16:
1072 ; RV32-NEXT: csrr a1, vlenb
1073 ; RV32-NEXT: slli a1, a1, 1
1074 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1075 ; RV32-NEXT: vmin.vx v8, v8, a0, v0.t
1078 ; RV64-LABEL: vmin_vx_nxv32i32_evl_nx16:
1080 ; RV64-NEXT: csrr a1, vlenb
1081 ; RV64-NEXT: srli a2, a1, 2
1082 ; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
1083 ; RV64-NEXT: vslidedown.vx v24, v0, a2
1084 ; RV64-NEXT: slli a1, a1, 1
1085 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1086 ; RV64-NEXT: vmin.vx v8, v8, a0, v0.t
1087 ; RV64-NEXT: vmv1r.v v0, v24
1088 ; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
1089 ; RV64-NEXT: vmin.vx v16, v16, a0, v0.t
1091 %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
1092 %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
1093 %evl = call i32 @llvm.vscale.i32()
1094 %evl0 = mul i32 %evl, 16
1095 %v = call <vscale x 32 x i32> @llvm.vp.smin.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl0)
1096 ret <vscale x 32 x i32> %v
1099 declare <vscale x 1 x i64> @llvm.vp.smin.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1101 define <vscale x 1 x i64> @vmin_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1102 ; CHECK-LABEL: vmin_vv_nxv1i64:
1104 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1105 ; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
1107 %v = call <vscale x 1 x i64> @llvm.vp.smin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
1108 ret <vscale x 1 x i64> %v
1111 define <vscale x 1 x i64> @vmin_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
1112 ; CHECK-LABEL: vmin_vv_nxv1i64_unmasked:
1114 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1115 ; CHECK-NEXT: vmin.vv v8, v8, v9
1117 %v = call <vscale x 1 x i64> @llvm.vp.smin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1118 ret <vscale x 1 x i64> %v
1121 define <vscale x 1 x i64> @vmin_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1122 ; RV32-LABEL: vmin_vx_nxv1i64:
1124 ; RV32-NEXT: addi sp, sp, -16
1125 ; RV32-NEXT: .cfi_def_cfa_offset 16
1126 ; RV32-NEXT: sw a1, 12(sp)
1127 ; RV32-NEXT: sw a0, 8(sp)
1128 ; RV32-NEXT: addi a0, sp, 8
1129 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1130 ; RV32-NEXT: vlse64.v v9, (a0), zero
1131 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1132 ; RV32-NEXT: vmin.vv v8, v8, v9, v0.t
1133 ; RV32-NEXT: addi sp, sp, 16
1136 ; RV64-LABEL: vmin_vx_nxv1i64:
1138 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1139 ; RV64-NEXT: vmin.vx v8, v8, a0, v0.t
1141 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1142 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1143 %v = call <vscale x 1 x i64> @llvm.vp.smin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
1144 ret <vscale x 1 x i64> %v
1147 define <vscale x 1 x i64> @vmin_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
1148 ; RV32-LABEL: vmin_vx_nxv1i64_unmasked:
1150 ; RV32-NEXT: addi sp, sp, -16
1151 ; RV32-NEXT: .cfi_def_cfa_offset 16
1152 ; RV32-NEXT: sw a1, 12(sp)
1153 ; RV32-NEXT: sw a0, 8(sp)
1154 ; RV32-NEXT: addi a0, sp, 8
1155 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1156 ; RV32-NEXT: vlse64.v v9, (a0), zero
1157 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1158 ; RV32-NEXT: vmin.vv v8, v8, v9
1159 ; RV32-NEXT: addi sp, sp, 16
1162 ; RV64-LABEL: vmin_vx_nxv1i64_unmasked:
1164 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1165 ; RV64-NEXT: vmin.vx v8, v8, a0
1167 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1168 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1169 %v = call <vscale x 1 x i64> @llvm.vp.smin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1170 ret <vscale x 1 x i64> %v
1173 declare <vscale x 2 x i64> @llvm.vp.smin.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1175 define <vscale x 2 x i64> @vmin_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1176 ; CHECK-LABEL: vmin_vv_nxv2i64:
1178 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1179 ; CHECK-NEXT: vmin.vv v8, v8, v10, v0.t
1181 %v = call <vscale x 2 x i64> @llvm.vp.smin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
1182 ret <vscale x 2 x i64> %v
1185 define <vscale x 2 x i64> @vmin_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
1186 ; CHECK-LABEL: vmin_vv_nxv2i64_unmasked:
1188 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1189 ; CHECK-NEXT: vmin.vv v8, v8, v10
1191 %v = call <vscale x 2 x i64> @llvm.vp.smin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1192 ret <vscale x 2 x i64> %v
1195 define <vscale x 2 x i64> @vmin_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1196 ; RV32-LABEL: vmin_vx_nxv2i64:
1198 ; RV32-NEXT: addi sp, sp, -16
1199 ; RV32-NEXT: .cfi_def_cfa_offset 16
1200 ; RV32-NEXT: sw a1, 12(sp)
1201 ; RV32-NEXT: sw a0, 8(sp)
1202 ; RV32-NEXT: addi a0, sp, 8
1203 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1204 ; RV32-NEXT: vlse64.v v10, (a0), zero
1205 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1206 ; RV32-NEXT: vmin.vv v8, v8, v10, v0.t
1207 ; RV32-NEXT: addi sp, sp, 16
1210 ; RV64-LABEL: vmin_vx_nxv2i64:
1212 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1213 ; RV64-NEXT: vmin.vx v8, v8, a0, v0.t
1215 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1216 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1217 %v = call <vscale x 2 x i64> @llvm.vp.smin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
1218 ret <vscale x 2 x i64> %v
1221 define <vscale x 2 x i64> @vmin_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
1222 ; RV32-LABEL: vmin_vx_nxv2i64_unmasked:
1224 ; RV32-NEXT: addi sp, sp, -16
1225 ; RV32-NEXT: .cfi_def_cfa_offset 16
1226 ; RV32-NEXT: sw a1, 12(sp)
1227 ; RV32-NEXT: sw a0, 8(sp)
1228 ; RV32-NEXT: addi a0, sp, 8
1229 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1230 ; RV32-NEXT: vlse64.v v10, (a0), zero
1231 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1232 ; RV32-NEXT: vmin.vv v8, v8, v10
1233 ; RV32-NEXT: addi sp, sp, 16
1236 ; RV64-LABEL: vmin_vx_nxv2i64_unmasked:
1238 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1239 ; RV64-NEXT: vmin.vx v8, v8, a0
1241 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1242 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1243 %v = call <vscale x 2 x i64> @llvm.vp.smin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1244 ret <vscale x 2 x i64> %v
1247 declare <vscale x 4 x i64> @llvm.vp.smin.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1249 define <vscale x 4 x i64> @vmin_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1250 ; CHECK-LABEL: vmin_vv_nxv4i64:
1252 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1253 ; CHECK-NEXT: vmin.vv v8, v8, v12, v0.t
1255 %v = call <vscale x 4 x i64> @llvm.vp.smin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
1256 ret <vscale x 4 x i64> %v
1259 define <vscale x 4 x i64> @vmin_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
1260 ; CHECK-LABEL: vmin_vv_nxv4i64_unmasked:
1262 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1263 ; CHECK-NEXT: vmin.vv v8, v8, v12
1265 %v = call <vscale x 4 x i64> @llvm.vp.smin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1266 ret <vscale x 4 x i64> %v
1269 define <vscale x 4 x i64> @vmin_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1270 ; RV32-LABEL: vmin_vx_nxv4i64:
1272 ; RV32-NEXT: addi sp, sp, -16
1273 ; RV32-NEXT: .cfi_def_cfa_offset 16
1274 ; RV32-NEXT: sw a1, 12(sp)
1275 ; RV32-NEXT: sw a0, 8(sp)
1276 ; RV32-NEXT: addi a0, sp, 8
1277 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1278 ; RV32-NEXT: vlse64.v v12, (a0), zero
1279 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1280 ; RV32-NEXT: vmin.vv v8, v8, v12, v0.t
1281 ; RV32-NEXT: addi sp, sp, 16
1284 ; RV64-LABEL: vmin_vx_nxv4i64:
1286 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1287 ; RV64-NEXT: vmin.vx v8, v8, a0, v0.t
1289 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1290 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1291 %v = call <vscale x 4 x i64> @llvm.vp.smin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
1292 ret <vscale x 4 x i64> %v
1295 define <vscale x 4 x i64> @vmin_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1296 ; RV32-LABEL: vmin_vx_nxv4i64_unmasked:
1298 ; RV32-NEXT: addi sp, sp, -16
1299 ; RV32-NEXT: .cfi_def_cfa_offset 16
1300 ; RV32-NEXT: sw a1, 12(sp)
1301 ; RV32-NEXT: sw a0, 8(sp)
1302 ; RV32-NEXT: addi a0, sp, 8
1303 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1304 ; RV32-NEXT: vlse64.v v12, (a0), zero
1305 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1306 ; RV32-NEXT: vmin.vv v8, v8, v12
1307 ; RV32-NEXT: addi sp, sp, 16
1310 ; RV64-LABEL: vmin_vx_nxv4i64_unmasked:
1312 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1313 ; RV64-NEXT: vmin.vx v8, v8, a0
1315 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1316 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1317 %v = call <vscale x 4 x i64> @llvm.vp.smin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1318 ret <vscale x 4 x i64> %v
1321 declare <vscale x 8 x i64> @llvm.vp.smin.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1323 define <vscale x 8 x i64> @vmin_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1324 ; CHECK-LABEL: vmin_vv_nxv8i64:
1326 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1327 ; CHECK-NEXT: vmin.vv v8, v8, v16, v0.t
1329 %v = call <vscale x 8 x i64> @llvm.vp.smin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
1330 ret <vscale x 8 x i64> %v
1333 define <vscale x 8 x i64> @vmin_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
1334 ; CHECK-LABEL: vmin_vv_nxv8i64_unmasked:
1336 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1337 ; CHECK-NEXT: vmin.vv v8, v8, v16
1339 %v = call <vscale x 8 x i64> @llvm.vp.smin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1340 ret <vscale x 8 x i64> %v
1343 define <vscale x 8 x i64> @vmin_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1344 ; RV32-LABEL: vmin_vx_nxv8i64:
1346 ; RV32-NEXT: addi sp, sp, -16
1347 ; RV32-NEXT: .cfi_def_cfa_offset 16
1348 ; RV32-NEXT: sw a1, 12(sp)
1349 ; RV32-NEXT: sw a0, 8(sp)
1350 ; RV32-NEXT: addi a0, sp, 8
1351 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1352 ; RV32-NEXT: vlse64.v v16, (a0), zero
1353 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1354 ; RV32-NEXT: vmin.vv v8, v8, v16, v0.t
1355 ; RV32-NEXT: addi sp, sp, 16
1358 ; RV64-LABEL: vmin_vx_nxv8i64:
1360 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1361 ; RV64-NEXT: vmin.vx v8, v8, a0, v0.t
1363 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1364 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1365 %v = call <vscale x 8 x i64> @llvm.vp.smin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1366 ret <vscale x 8 x i64> %v
1369 define <vscale x 8 x i64> @vmin_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1370 ; RV32-LABEL: vmin_vx_nxv8i64_unmasked:
1372 ; RV32-NEXT: addi sp, sp, -16
1373 ; RV32-NEXT: .cfi_def_cfa_offset 16
1374 ; RV32-NEXT: sw a1, 12(sp)
1375 ; RV32-NEXT: sw a0, 8(sp)
1376 ; RV32-NEXT: addi a0, sp, 8
1377 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1378 ; RV32-NEXT: vlse64.v v16, (a0), zero
1379 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1380 ; RV32-NEXT: vmin.vv v8, v8, v16
1381 ; RV32-NEXT: addi sp, sp, 16
1384 ; RV64-LABEL: vmin_vx_nxv8i64_unmasked:
1386 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1387 ; RV64-NEXT: vmin.vx v8, v8, a0
1389 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1390 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1391 %v = call <vscale x 8 x i64> @llvm.vp.smin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1392 ret <vscale x 8 x i64> %v