1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 8 x i7> @llvm.vp.umax.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
9 define <vscale x 8 x i7> @vmaxu_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
10 ; CHECK-LABEL: vmaxu_vx_nxv8i7:
12 ; CHECK-NEXT: li a2, 127
13 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
14 ; CHECK-NEXT: vand.vx v8, v8, a2, v0.t
15 ; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
16 ; CHECK-NEXT: vmv.v.x v9, a0
17 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
18 ; CHECK-NEXT: vand.vx v9, v9, a2, v0.t
19 ; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
21 %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
22 %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
23 %v = call <vscale x 8 x i7> @llvm.vp.umax.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
24 ret <vscale x 8 x i7> %v
27 declare <vscale x 1 x i8> @llvm.vp.umax.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
29 define <vscale x 1 x i8> @vmaxu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
30 ; CHECK-LABEL: vmaxu_vv_nxv1i8:
32 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
33 ; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
35 %v = call <vscale x 1 x i8> @llvm.vp.umax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
36 ret <vscale x 1 x i8> %v
39 define <vscale x 1 x i8> @vmaxu_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
40 ; CHECK-LABEL: vmaxu_vv_nxv1i8_unmasked:
42 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
43 ; CHECK-NEXT: vmaxu.vv v8, v8, v9
45 %v = call <vscale x 1 x i8> @llvm.vp.umax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
46 ret <vscale x 1 x i8> %v
49 define <vscale x 1 x i8> @vmaxu_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
50 ; CHECK-LABEL: vmaxu_vx_nxv1i8:
52 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
53 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
55 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
56 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
57 %v = call <vscale x 1 x i8> @llvm.vp.umax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
58 ret <vscale x 1 x i8> %v
61 define <vscale x 1 x i8> @vmaxu_vx_nxv1i8_commute(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
62 ; CHECK-LABEL: vmaxu_vx_nxv1i8_commute:
64 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
65 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
67 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
68 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
69 %v = call <vscale x 1 x i8> @llvm.vp.umax.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
70 ret <vscale x 1 x i8> %v
73 define <vscale x 1 x i8> @vmaxu_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
74 ; CHECK-LABEL: vmaxu_vx_nxv1i8_unmasked:
76 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
77 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
79 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
80 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
81 %v = call <vscale x 1 x i8> @llvm.vp.umax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
82 ret <vscale x 1 x i8> %v
85 declare <vscale x 2 x i8> @llvm.vp.umax.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
87 define <vscale x 2 x i8> @vmaxu_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
88 ; CHECK-LABEL: vmaxu_vv_nxv2i8:
90 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
91 ; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
93 %v = call <vscale x 2 x i8> @llvm.vp.umax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
94 ret <vscale x 2 x i8> %v
97 define <vscale x 2 x i8> @vmaxu_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
98 ; CHECK-LABEL: vmaxu_vv_nxv2i8_unmasked:
100 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
101 ; CHECK-NEXT: vmaxu.vv v8, v8, v9
103 %v = call <vscale x 2 x i8> @llvm.vp.umax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
104 ret <vscale x 2 x i8> %v
107 define <vscale x 2 x i8> @vmaxu_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
108 ; CHECK-LABEL: vmaxu_vx_nxv2i8:
110 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
111 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
113 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
114 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
115 %v = call <vscale x 2 x i8> @llvm.vp.umax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
116 ret <vscale x 2 x i8> %v
119 define <vscale x 2 x i8> @vmaxu_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
120 ; CHECK-LABEL: vmaxu_vx_nxv2i8_unmasked:
122 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
123 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
125 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
126 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
127 %v = call <vscale x 2 x i8> @llvm.vp.umax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
128 ret <vscale x 2 x i8> %v
131 declare <vscale x 3 x i8> @llvm.vp.umax.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
133 define <vscale x 3 x i8> @vmaxu_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
134 ; CHECK-LABEL: vmaxu_vv_nxv3i8:
136 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
137 ; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
139 %v = call <vscale x 3 x i8> @llvm.vp.umax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
140 ret <vscale x 3 x i8> %v
143 define <vscale x 3 x i8> @vmaxu_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, i32 zeroext %evl) {
144 ; CHECK-LABEL: vmaxu_vv_nxv3i8_unmasked:
146 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
147 ; CHECK-NEXT: vmaxu.vv v8, v8, v9
149 %v = call <vscale x 3 x i8> @llvm.vp.umax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> splat (i1 true), i32 %evl)
150 ret <vscale x 3 x i8> %v
153 define <vscale x 3 x i8> @vmaxu_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
154 ; CHECK-LABEL: vmaxu_vx_nxv3i8:
156 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
157 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
159 %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
160 %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
161 %v = call <vscale x 3 x i8> @llvm.vp.umax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
162 ret <vscale x 3 x i8> %v
165 define <vscale x 3 x i8> @vmaxu_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b, i32 zeroext %evl) {
166 ; CHECK-LABEL: vmaxu_vx_nxv3i8_unmasked:
168 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
169 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
171 %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
172 %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
173 %v = call <vscale x 3 x i8> @llvm.vp.umax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> splat (i1 true), i32 %evl)
174 ret <vscale x 3 x i8> %v
177 declare <vscale x 4 x i8> @llvm.vp.umax.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
179 define <vscale x 4 x i8> @vmaxu_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
180 ; CHECK-LABEL: vmaxu_vv_nxv4i8:
182 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
183 ; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
185 %v = call <vscale x 4 x i8> @llvm.vp.umax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
186 ret <vscale x 4 x i8> %v
189 define <vscale x 4 x i8> @vmaxu_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
190 ; CHECK-LABEL: vmaxu_vv_nxv4i8_unmasked:
192 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
193 ; CHECK-NEXT: vmaxu.vv v8, v8, v9
195 %v = call <vscale x 4 x i8> @llvm.vp.umax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
196 ret <vscale x 4 x i8> %v
199 define <vscale x 4 x i8> @vmaxu_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
200 ; CHECK-LABEL: vmaxu_vx_nxv4i8:
202 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
203 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
205 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
206 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
207 %v = call <vscale x 4 x i8> @llvm.vp.umax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
208 ret <vscale x 4 x i8> %v
211 define <vscale x 4 x i8> @vmaxu_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
212 ; CHECK-LABEL: vmaxu_vx_nxv4i8_unmasked:
214 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
215 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
217 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
218 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
219 %v = call <vscale x 4 x i8> @llvm.vp.umax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
220 ret <vscale x 4 x i8> %v
223 declare <vscale x 8 x i8> @llvm.vp.umax.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
225 define <vscale x 8 x i8> @vmaxu_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
226 ; CHECK-LABEL: vmaxu_vv_nxv8i8:
228 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
229 ; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
231 %v = call <vscale x 8 x i8> @llvm.vp.umax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
232 ret <vscale x 8 x i8> %v
235 define <vscale x 8 x i8> @vmaxu_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
236 ; CHECK-LABEL: vmaxu_vv_nxv8i8_unmasked:
238 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
239 ; CHECK-NEXT: vmaxu.vv v8, v8, v9
241 %v = call <vscale x 8 x i8> @llvm.vp.umax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
242 ret <vscale x 8 x i8> %v
245 define <vscale x 8 x i8> @vmaxu_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
246 ; CHECK-LABEL: vmaxu_vx_nxv8i8:
248 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
249 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
251 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
252 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
253 %v = call <vscale x 8 x i8> @llvm.vp.umax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
254 ret <vscale x 8 x i8> %v
257 define <vscale x 8 x i8> @vmaxu_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
258 ; CHECK-LABEL: vmaxu_vx_nxv8i8_unmasked:
260 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
261 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
263 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
264 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
265 %v = call <vscale x 8 x i8> @llvm.vp.umax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
266 ret <vscale x 8 x i8> %v
269 declare <vscale x 16 x i8> @llvm.vp.umax.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
271 define <vscale x 16 x i8> @vmaxu_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
272 ; CHECK-LABEL: vmaxu_vv_nxv16i8:
274 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
275 ; CHECK-NEXT: vmaxu.vv v8, v8, v10, v0.t
277 %v = call <vscale x 16 x i8> @llvm.vp.umax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
278 ret <vscale x 16 x i8> %v
281 define <vscale x 16 x i8> @vmaxu_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
282 ; CHECK-LABEL: vmaxu_vv_nxv16i8_unmasked:
284 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
285 ; CHECK-NEXT: vmaxu.vv v8, v8, v10
287 %v = call <vscale x 16 x i8> @llvm.vp.umax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
288 ret <vscale x 16 x i8> %v
291 define <vscale x 16 x i8> @vmaxu_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
292 ; CHECK-LABEL: vmaxu_vx_nxv16i8:
294 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
295 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
297 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
298 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
299 %v = call <vscale x 16 x i8> @llvm.vp.umax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
300 ret <vscale x 16 x i8> %v
303 define <vscale x 16 x i8> @vmaxu_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
304 ; CHECK-LABEL: vmaxu_vx_nxv16i8_unmasked:
306 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
307 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
309 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
310 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
311 %v = call <vscale x 16 x i8> @llvm.vp.umax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
312 ret <vscale x 16 x i8> %v
315 declare <vscale x 32 x i8> @llvm.vp.umax.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
317 define <vscale x 32 x i8> @vmaxu_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
318 ; CHECK-LABEL: vmaxu_vv_nxv32i8:
320 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
321 ; CHECK-NEXT: vmaxu.vv v8, v8, v12, v0.t
323 %v = call <vscale x 32 x i8> @llvm.vp.umax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
324 ret <vscale x 32 x i8> %v
327 define <vscale x 32 x i8> @vmaxu_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
328 ; CHECK-LABEL: vmaxu_vv_nxv32i8_unmasked:
330 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
331 ; CHECK-NEXT: vmaxu.vv v8, v8, v12
333 %v = call <vscale x 32 x i8> @llvm.vp.umax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
334 ret <vscale x 32 x i8> %v
337 define <vscale x 32 x i8> @vmaxu_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
338 ; CHECK-LABEL: vmaxu_vx_nxv32i8:
340 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
341 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
343 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
344 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
345 %v = call <vscale x 32 x i8> @llvm.vp.umax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
346 ret <vscale x 32 x i8> %v
349 define <vscale x 32 x i8> @vmaxu_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
350 ; CHECK-LABEL: vmaxu_vx_nxv32i8_unmasked:
352 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
353 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
355 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
356 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
357 %v = call <vscale x 32 x i8> @llvm.vp.umax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
358 ret <vscale x 32 x i8> %v
361 declare <vscale x 64 x i8> @llvm.vp.umax.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
363 define <vscale x 64 x i8> @vmaxu_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
364 ; CHECK-LABEL: vmaxu_vv_nxv64i8:
366 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
367 ; CHECK-NEXT: vmaxu.vv v8, v8, v16, v0.t
369 %v = call <vscale x 64 x i8> @llvm.vp.umax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
370 ret <vscale x 64 x i8> %v
373 define <vscale x 64 x i8> @vmaxu_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
374 ; CHECK-LABEL: vmaxu_vv_nxv64i8_unmasked:
376 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
377 ; CHECK-NEXT: vmaxu.vv v8, v8, v16
379 %v = call <vscale x 64 x i8> @llvm.vp.umax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
380 ret <vscale x 64 x i8> %v
383 define <vscale x 64 x i8> @vmaxu_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
384 ; CHECK-LABEL: vmaxu_vx_nxv64i8:
386 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
387 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
389 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
390 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
391 %v = call <vscale x 64 x i8> @llvm.vp.umax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
392 ret <vscale x 64 x i8> %v
395 define <vscale x 64 x i8> @vmaxu_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
396 ; CHECK-LABEL: vmaxu_vx_nxv64i8_unmasked:
398 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
399 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
401 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
402 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
403 %v = call <vscale x 64 x i8> @llvm.vp.umax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
404 ret <vscale x 64 x i8> %v
407 ; Test that split-legalization works when the mask itself needs splitting.
409 declare <vscale x 128 x i8> @llvm.vp.umax.nxv128i8(<vscale x 128 x i8>, <vscale x 128 x i8>, <vscale x 128 x i1>, i32)
411 define <vscale x 128 x i8> @vmaxu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
412 ; CHECK-LABEL: vmaxu_vx_nxv128i8:
414 ; CHECK-NEXT: vmv1r.v v24, v0
415 ; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
416 ; CHECK-NEXT: vlm.v v0, (a1)
417 ; CHECK-NEXT: csrr a1, vlenb
418 ; CHECK-NEXT: slli a1, a1, 3
419 ; CHECK-NEXT: sub a3, a2, a1
420 ; CHECK-NEXT: sltu a4, a2, a3
421 ; CHECK-NEXT: addi a4, a4, -1
422 ; CHECK-NEXT: and a3, a4, a3
423 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
424 ; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t
425 ; CHECK-NEXT: bltu a2, a1, .LBB34_2
426 ; CHECK-NEXT: # %bb.1:
427 ; CHECK-NEXT: mv a2, a1
428 ; CHECK-NEXT: .LBB34_2:
429 ; CHECK-NEXT: vmv1r.v v0, v24
430 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
431 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
433 %elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
434 %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
435 %v = call <vscale x 128 x i8> @llvm.vp.umax.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
436 ret <vscale x 128 x i8> %v
439 define <vscale x 128 x i8> @vmaxu_vx_nxv128i8_unmasked(<vscale x 128 x i8> %va, i8 %b, i32 zeroext %evl) {
440 ; CHECK-LABEL: vmaxu_vx_nxv128i8_unmasked:
442 ; CHECK-NEXT: csrr a2, vlenb
443 ; CHECK-NEXT: slli a2, a2, 3
444 ; CHECK-NEXT: sub a3, a1, a2
445 ; CHECK-NEXT: sltu a4, a1, a3
446 ; CHECK-NEXT: addi a4, a4, -1
447 ; CHECK-NEXT: and a3, a4, a3
448 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
449 ; CHECK-NEXT: vmaxu.vx v16, v16, a0
450 ; CHECK-NEXT: bltu a1, a2, .LBB35_2
451 ; CHECK-NEXT: # %bb.1:
452 ; CHECK-NEXT: mv a1, a2
453 ; CHECK-NEXT: .LBB35_2:
454 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
455 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
457 %elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
458 %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
459 %v = call <vscale x 128 x i8> @llvm.vp.umax.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> splat (i1 true), i32 %evl)
460 ret <vscale x 128 x i8> %v
463 declare <vscale x 1 x i16> @llvm.vp.umax.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
465 define <vscale x 1 x i16> @vmaxu_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
466 ; CHECK-LABEL: vmaxu_vv_nxv1i16:
468 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
469 ; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
471 %v = call <vscale x 1 x i16> @llvm.vp.umax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
472 ret <vscale x 1 x i16> %v
475 define <vscale x 1 x i16> @vmaxu_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
476 ; CHECK-LABEL: vmaxu_vv_nxv1i16_unmasked:
478 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
479 ; CHECK-NEXT: vmaxu.vv v8, v8, v9
481 %v = call <vscale x 1 x i16> @llvm.vp.umax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
482 ret <vscale x 1 x i16> %v
485 define <vscale x 1 x i16> @vmaxu_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
486 ; CHECK-LABEL: vmaxu_vx_nxv1i16:
488 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
489 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
491 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
492 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
493 %v = call <vscale x 1 x i16> @llvm.vp.umax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
494 ret <vscale x 1 x i16> %v
497 define <vscale x 1 x i16> @vmaxu_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
498 ; CHECK-LABEL: vmaxu_vx_nxv1i16_unmasked:
500 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
501 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
503 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
504 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
505 %v = call <vscale x 1 x i16> @llvm.vp.umax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
506 ret <vscale x 1 x i16> %v
509 declare <vscale x 2 x i16> @llvm.vp.umax.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
511 define <vscale x 2 x i16> @vmaxu_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
512 ; CHECK-LABEL: vmaxu_vv_nxv2i16:
514 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
515 ; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
517 %v = call <vscale x 2 x i16> @llvm.vp.umax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
518 ret <vscale x 2 x i16> %v
521 define <vscale x 2 x i16> @vmaxu_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
522 ; CHECK-LABEL: vmaxu_vv_nxv2i16_unmasked:
524 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
525 ; CHECK-NEXT: vmaxu.vv v8, v8, v9
527 %v = call <vscale x 2 x i16> @llvm.vp.umax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
528 ret <vscale x 2 x i16> %v
531 define <vscale x 2 x i16> @vmaxu_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
532 ; CHECK-LABEL: vmaxu_vx_nxv2i16:
534 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
535 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
537 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
538 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
539 %v = call <vscale x 2 x i16> @llvm.vp.umax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
540 ret <vscale x 2 x i16> %v
543 define <vscale x 2 x i16> @vmaxu_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
544 ; CHECK-LABEL: vmaxu_vx_nxv2i16_unmasked:
546 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
547 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
549 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
550 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
551 %v = call <vscale x 2 x i16> @llvm.vp.umax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
552 ret <vscale x 2 x i16> %v
555 declare <vscale x 4 x i16> @llvm.vp.umax.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
557 define <vscale x 4 x i16> @vmaxu_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
558 ; CHECK-LABEL: vmaxu_vv_nxv4i16:
560 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
561 ; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
563 %v = call <vscale x 4 x i16> @llvm.vp.umax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
564 ret <vscale x 4 x i16> %v
567 define <vscale x 4 x i16> @vmaxu_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
568 ; CHECK-LABEL: vmaxu_vv_nxv4i16_unmasked:
570 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
571 ; CHECK-NEXT: vmaxu.vv v8, v8, v9
573 %v = call <vscale x 4 x i16> @llvm.vp.umax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
574 ret <vscale x 4 x i16> %v
577 define <vscale x 4 x i16> @vmaxu_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
578 ; CHECK-LABEL: vmaxu_vx_nxv4i16:
580 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
581 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
583 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
584 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
585 %v = call <vscale x 4 x i16> @llvm.vp.umax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
586 ret <vscale x 4 x i16> %v
589 define <vscale x 4 x i16> @vmaxu_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
590 ; CHECK-LABEL: vmaxu_vx_nxv4i16_unmasked:
592 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
593 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
595 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
596 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
597 %v = call <vscale x 4 x i16> @llvm.vp.umax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
598 ret <vscale x 4 x i16> %v
601 declare <vscale x 8 x i16> @llvm.vp.umax.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
603 define <vscale x 8 x i16> @vmaxu_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
604 ; CHECK-LABEL: vmaxu_vv_nxv8i16:
606 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
607 ; CHECK-NEXT: vmaxu.vv v8, v8, v10, v0.t
609 %v = call <vscale x 8 x i16> @llvm.vp.umax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
610 ret <vscale x 8 x i16> %v
613 define <vscale x 8 x i16> @vmaxu_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
614 ; CHECK-LABEL: vmaxu_vv_nxv8i16_unmasked:
616 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
617 ; CHECK-NEXT: vmaxu.vv v8, v8, v10
619 %v = call <vscale x 8 x i16> @llvm.vp.umax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
620 ret <vscale x 8 x i16> %v
623 define <vscale x 8 x i16> @vmaxu_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
624 ; CHECK-LABEL: vmaxu_vx_nxv8i16:
626 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
627 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
629 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
630 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
631 %v = call <vscale x 8 x i16> @llvm.vp.umax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
632 ret <vscale x 8 x i16> %v
635 define <vscale x 8 x i16> @vmaxu_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
636 ; CHECK-LABEL: vmaxu_vx_nxv8i16_unmasked:
638 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
639 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
641 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
642 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
643 %v = call <vscale x 8 x i16> @llvm.vp.umax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
644 ret <vscale x 8 x i16> %v
647 declare <vscale x 16 x i16> @llvm.vp.umax.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
649 define <vscale x 16 x i16> @vmaxu_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
650 ; CHECK-LABEL: vmaxu_vv_nxv16i16:
652 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
653 ; CHECK-NEXT: vmaxu.vv v8, v8, v12, v0.t
655 %v = call <vscale x 16 x i16> @llvm.vp.umax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
656 ret <vscale x 16 x i16> %v
659 define <vscale x 16 x i16> @vmaxu_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
660 ; CHECK-LABEL: vmaxu_vv_nxv16i16_unmasked:
662 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
663 ; CHECK-NEXT: vmaxu.vv v8, v8, v12
665 %v = call <vscale x 16 x i16> @llvm.vp.umax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
666 ret <vscale x 16 x i16> %v
669 define <vscale x 16 x i16> @vmaxu_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
670 ; CHECK-LABEL: vmaxu_vx_nxv16i16:
672 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
673 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
675 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
676 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
677 %v = call <vscale x 16 x i16> @llvm.vp.umax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
678 ret <vscale x 16 x i16> %v
681 define <vscale x 16 x i16> @vmaxu_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
682 ; CHECK-LABEL: vmaxu_vx_nxv16i16_unmasked:
684 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
685 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
687 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
688 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
689 %v = call <vscale x 16 x i16> @llvm.vp.umax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
690 ret <vscale x 16 x i16> %v
693 declare <vscale x 32 x i16> @llvm.vp.umax.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
695 define <vscale x 32 x i16> @vmaxu_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
696 ; CHECK-LABEL: vmaxu_vv_nxv32i16:
698 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
699 ; CHECK-NEXT: vmaxu.vv v8, v8, v16, v0.t
701 %v = call <vscale x 32 x i16> @llvm.vp.umax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
702 ret <vscale x 32 x i16> %v
705 define <vscale x 32 x i16> @vmaxu_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
706 ; CHECK-LABEL: vmaxu_vv_nxv32i16_unmasked:
708 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
709 ; CHECK-NEXT: vmaxu.vv v8, v8, v16
711 %v = call <vscale x 32 x i16> @llvm.vp.umax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
712 ret <vscale x 32 x i16> %v
715 define <vscale x 32 x i16> @vmaxu_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
716 ; CHECK-LABEL: vmaxu_vx_nxv32i16:
718 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
719 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
721 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
722 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
723 %v = call <vscale x 32 x i16> @llvm.vp.umax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
724 ret <vscale x 32 x i16> %v
727 define <vscale x 32 x i16> @vmaxu_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
728 ; CHECK-LABEL: vmaxu_vx_nxv32i16_unmasked:
730 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
731 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
733 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
734 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
735 %v = call <vscale x 32 x i16> @llvm.vp.umax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
736 ret <vscale x 32 x i16> %v
739 declare <vscale x 1 x i32> @llvm.vp.umax.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
741 define <vscale x 1 x i32> @vmaxu_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
742 ; CHECK-LABEL: vmaxu_vv_nxv1i32:
744 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
745 ; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
747 %v = call <vscale x 1 x i32> @llvm.vp.umax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
748 ret <vscale x 1 x i32> %v
751 define <vscale x 1 x i32> @vmaxu_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
752 ; CHECK-LABEL: vmaxu_vv_nxv1i32_unmasked:
754 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
755 ; CHECK-NEXT: vmaxu.vv v8, v8, v9
757 %v = call <vscale x 1 x i32> @llvm.vp.umax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
758 ret <vscale x 1 x i32> %v
761 define <vscale x 1 x i32> @vmaxu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
762 ; CHECK-LABEL: vmaxu_vx_nxv1i32:
764 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
765 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
767 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
768 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
769 %v = call <vscale x 1 x i32> @llvm.vp.umax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
770 ret <vscale x 1 x i32> %v
773 define <vscale x 1 x i32> @vmaxu_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
774 ; CHECK-LABEL: vmaxu_vx_nxv1i32_unmasked:
776 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
777 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
779 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
780 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
781 %v = call <vscale x 1 x i32> @llvm.vp.umax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
782 ret <vscale x 1 x i32> %v
785 declare <vscale x 2 x i32> @llvm.vp.umax.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
787 define <vscale x 2 x i32> @vmaxu_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
788 ; CHECK-LABEL: vmaxu_vv_nxv2i32:
790 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
791 ; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
793 %v = call <vscale x 2 x i32> @llvm.vp.umax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
794 ret <vscale x 2 x i32> %v
797 define <vscale x 2 x i32> @vmaxu_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
798 ; CHECK-LABEL: vmaxu_vv_nxv2i32_unmasked:
800 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
801 ; CHECK-NEXT: vmaxu.vv v8, v8, v9
803 %v = call <vscale x 2 x i32> @llvm.vp.umax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
804 ret <vscale x 2 x i32> %v
807 define <vscale x 2 x i32> @vmaxu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
808 ; CHECK-LABEL: vmaxu_vx_nxv2i32:
810 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
811 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
813 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
814 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
815 %v = call <vscale x 2 x i32> @llvm.vp.umax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
816 ret <vscale x 2 x i32> %v
819 define <vscale x 2 x i32> @vmaxu_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
820 ; CHECK-LABEL: vmaxu_vx_nxv2i32_unmasked:
822 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
823 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
825 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
826 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
827 %v = call <vscale x 2 x i32> @llvm.vp.umax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
828 ret <vscale x 2 x i32> %v
831 declare <vscale x 4 x i32> @llvm.vp.umax.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
833 define <vscale x 4 x i32> @vmaxu_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
834 ; CHECK-LABEL: vmaxu_vv_nxv4i32:
836 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
837 ; CHECK-NEXT: vmaxu.vv v8, v8, v10, v0.t
839 %v = call <vscale x 4 x i32> @llvm.vp.umax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
840 ret <vscale x 4 x i32> %v
843 define <vscale x 4 x i32> @vmaxu_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
844 ; CHECK-LABEL: vmaxu_vv_nxv4i32_unmasked:
846 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
847 ; CHECK-NEXT: vmaxu.vv v8, v8, v10
849 %v = call <vscale x 4 x i32> @llvm.vp.umax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
850 ret <vscale x 4 x i32> %v
853 define <vscale x 4 x i32> @vmaxu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
854 ; CHECK-LABEL: vmaxu_vx_nxv4i32:
856 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
857 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
859 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
860 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
861 %v = call <vscale x 4 x i32> @llvm.vp.umax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
862 ret <vscale x 4 x i32> %v
865 define <vscale x 4 x i32> @vmaxu_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
866 ; CHECK-LABEL: vmaxu_vx_nxv4i32_unmasked:
868 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
869 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
871 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
872 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
873 %v = call <vscale x 4 x i32> @llvm.vp.umax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
874 ret <vscale x 4 x i32> %v
877 declare <vscale x 8 x i32> @llvm.vp.umax.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
879 define <vscale x 8 x i32> @vmaxu_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
880 ; CHECK-LABEL: vmaxu_vv_nxv8i32:
882 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
883 ; CHECK-NEXT: vmaxu.vv v8, v8, v12, v0.t
885 %v = call <vscale x 8 x i32> @llvm.vp.umax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
886 ret <vscale x 8 x i32> %v
889 define <vscale x 8 x i32> @vmaxu_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
890 ; CHECK-LABEL: vmaxu_vv_nxv8i32_unmasked:
892 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
893 ; CHECK-NEXT: vmaxu.vv v8, v8, v12
895 %v = call <vscale x 8 x i32> @llvm.vp.umax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
896 ret <vscale x 8 x i32> %v
899 define <vscale x 8 x i32> @vmaxu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
900 ; CHECK-LABEL: vmaxu_vx_nxv8i32:
902 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
903 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
905 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
906 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
907 %v = call <vscale x 8 x i32> @llvm.vp.umax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
908 ret <vscale x 8 x i32> %v
911 define <vscale x 8 x i32> @vmaxu_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
912 ; CHECK-LABEL: vmaxu_vx_nxv8i32_unmasked:
914 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
915 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
917 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
918 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
919 %v = call <vscale x 8 x i32> @llvm.vp.umax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
920 ret <vscale x 8 x i32> %v
923 declare <vscale x 16 x i32> @llvm.vp.umax.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
925 define <vscale x 16 x i32> @vmaxu_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
926 ; CHECK-LABEL: vmaxu_vv_nxv16i32:
928 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
929 ; CHECK-NEXT: vmaxu.vv v8, v8, v16, v0.t
931 %v = call <vscale x 16 x i32> @llvm.vp.umax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
932 ret <vscale x 16 x i32> %v
935 define <vscale x 16 x i32> @vmaxu_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
936 ; CHECK-LABEL: vmaxu_vv_nxv16i32_unmasked:
938 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
939 ; CHECK-NEXT: vmaxu.vv v8, v8, v16
941 %v = call <vscale x 16 x i32> @llvm.vp.umax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
942 ret <vscale x 16 x i32> %v
945 define <vscale x 16 x i32> @vmaxu_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
946 ; CHECK-LABEL: vmaxu_vx_nxv16i32:
948 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
949 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
951 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
952 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
953 %v = call <vscale x 16 x i32> @llvm.vp.umax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
954 ret <vscale x 16 x i32> %v
957 define <vscale x 16 x i32> @vmaxu_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
958 ; CHECK-LABEL: vmaxu_vx_nxv16i32_unmasked:
960 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
961 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
963 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
964 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
965 %v = call <vscale x 16 x i32> @llvm.vp.umax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
966 ret <vscale x 16 x i32> %v
969 ; Test that split-legalization works then the mask needs manual splitting.
971 declare <vscale x 32 x i32> @llvm.vp.umax.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i32>, <vscale x 32 x i1>, i32)
973 define <vscale x 32 x i32> @vmaxu_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
974 ; CHECK-LABEL: vmaxu_vx_nxv32i32:
976 ; CHECK-NEXT: vmv1r.v v24, v0
977 ; CHECK-NEXT: csrr a2, vlenb
978 ; CHECK-NEXT: srli a3, a2, 2
979 ; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
980 ; CHECK-NEXT: vslidedown.vx v0, v0, a3
981 ; CHECK-NEXT: slli a2, a2, 1
982 ; CHECK-NEXT: sub a3, a1, a2
983 ; CHECK-NEXT: sltu a4, a1, a3
984 ; CHECK-NEXT: addi a4, a4, -1
985 ; CHECK-NEXT: and a3, a4, a3
986 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
987 ; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t
988 ; CHECK-NEXT: bltu a1, a2, .LBB80_2
989 ; CHECK-NEXT: # %bb.1:
990 ; CHECK-NEXT: mv a1, a2
991 ; CHECK-NEXT: .LBB80_2:
992 ; CHECK-NEXT: vmv1r.v v0, v24
993 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
994 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
996 %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
997 %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
998 %v = call <vscale x 32 x i32> @llvm.vp.umax.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
999 ret <vscale x 32 x i32> %v
1002 define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_unmasked(<vscale x 32 x i32> %va, i32 %b, i32 zeroext %evl) {
1003 ; CHECK-LABEL: vmaxu_vx_nxv32i32_unmasked:
1005 ; CHECK-NEXT: csrr a2, vlenb
1006 ; CHECK-NEXT: slli a2, a2, 1
1007 ; CHECK-NEXT: sub a3, a1, a2
1008 ; CHECK-NEXT: sltu a4, a1, a3
1009 ; CHECK-NEXT: addi a4, a4, -1
1010 ; CHECK-NEXT: and a3, a4, a3
1011 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
1012 ; CHECK-NEXT: vmaxu.vx v16, v16, a0
1013 ; CHECK-NEXT: bltu a1, a2, .LBB81_2
1014 ; CHECK-NEXT: # %bb.1:
1015 ; CHECK-NEXT: mv a1, a2
1016 ; CHECK-NEXT: .LBB81_2:
1017 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1018 ; CHECK-NEXT: vmaxu.vx v8, v8, a0
1020 %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
1021 %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
1022 %v = call <vscale x 32 x i32> @llvm.vp.umax.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
1023 ret <vscale x 32 x i32> %v
1026 ; Test splitting when the %evl is a constant (albeit an unknown one).
1028 declare i32 @llvm.vscale.i32()
1030 ; FIXME: The upper half of the operation is doing nothing.
1031 ; FIXME: The branches comparing vscale vs. vscale should be constant-foldable.
1033 define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
1034 ; CHECK-LABEL: vmaxu_vx_nxv32i32_evl_nx8:
1036 ; CHECK-NEXT: vmv1r.v v24, v0
1037 ; CHECK-NEXT: csrr a1, vlenb
1038 ; CHECK-NEXT: srli a2, a1, 2
1039 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
1040 ; CHECK-NEXT: vslidedown.vx v0, v0, a2
1041 ; CHECK-NEXT: slli a2, a1, 1
1042 ; CHECK-NEXT: sub a3, a1, a2
1043 ; CHECK-NEXT: sltu a4, a1, a3
1044 ; CHECK-NEXT: addi a4, a4, -1
1045 ; CHECK-NEXT: and a3, a4, a3
1046 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
1047 ; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t
1048 ; CHECK-NEXT: bltu a1, a2, .LBB82_2
1049 ; CHECK-NEXT: # %bb.1:
1050 ; CHECK-NEXT: mv a1, a2
1051 ; CHECK-NEXT: .LBB82_2:
1052 ; CHECK-NEXT: vmv1r.v v0, v24
1053 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1054 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
1056 %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
1057 %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
1058 %evl = call i32 @llvm.vscale.i32()
1059 %evl0 = mul i32 %evl, 8
1060 %v = call <vscale x 32 x i32> @llvm.vp.umax.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl0)
1061 ret <vscale x 32 x i32> %v
1064 ; FIXME: The first vmaxu.vx should be able to infer that its AVL is equivalent to VLMAX.
1065 ; FIXME: The upper half of the operation is doing nothing but we don't catch
1066 ; that on RV64; we issue a usubsat(and (vscale x 16), 0xffffffff, vscale x 16)
1067 ; (the "original" %evl is the "and", due to known-bits issues with legalizing
1068 ; the i32 %evl to i64) and this isn't detected as 0.
1069 ; This could be resolved in the future with more detailed KnownBits analysis
1072 define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_evl_nx16(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
1073 ; RV32-LABEL: vmaxu_vx_nxv32i32_evl_nx16:
1075 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
1076 ; RV32-NEXT: vmaxu.vx v8, v8, a0, v0.t
1079 ; RV64-LABEL: vmaxu_vx_nxv32i32_evl_nx16:
1081 ; RV64-NEXT: csrr a1, vlenb
1082 ; RV64-NEXT: srli a1, a1, 2
1083 ; RV64-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
1084 ; RV64-NEXT: vslidedown.vx v24, v0, a1
1085 ; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, ma
1086 ; RV64-NEXT: vmaxu.vx v8, v8, a0, v0.t
1087 ; RV64-NEXT: vmv1r.v v0, v24
1088 ; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
1089 ; RV64-NEXT: vmaxu.vx v16, v16, a0, v0.t
1091 %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
1092 %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
1093 %evl = call i32 @llvm.vscale.i32()
1094 %evl0 = mul i32 %evl, 16
1095 %v = call <vscale x 32 x i32> @llvm.vp.umax.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl0)
1096 ret <vscale x 32 x i32> %v
1099 declare <vscale x 1 x i64> @llvm.vp.umax.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1101 define <vscale x 1 x i64> @vmaxu_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1102 ; CHECK-LABEL: vmaxu_vv_nxv1i64:
1104 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1105 ; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
1107 %v = call <vscale x 1 x i64> @llvm.vp.umax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
1108 ret <vscale x 1 x i64> %v
1111 define <vscale x 1 x i64> @vmaxu_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
1112 ; CHECK-LABEL: vmaxu_vv_nxv1i64_unmasked:
1114 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1115 ; CHECK-NEXT: vmaxu.vv v8, v8, v9
1117 %v = call <vscale x 1 x i64> @llvm.vp.umax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1118 ret <vscale x 1 x i64> %v
1121 define <vscale x 1 x i64> @vmaxu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1122 ; RV32-LABEL: vmaxu_vx_nxv1i64:
1124 ; RV32-NEXT: addi sp, sp, -16
1125 ; RV32-NEXT: .cfi_def_cfa_offset 16
1126 ; RV32-NEXT: sw a1, 12(sp)
1127 ; RV32-NEXT: sw a0, 8(sp)
1128 ; RV32-NEXT: addi a0, sp, 8
1129 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1130 ; RV32-NEXT: vlse64.v v9, (a0), zero
1131 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1132 ; RV32-NEXT: vmaxu.vv v8, v8, v9, v0.t
1133 ; RV32-NEXT: addi sp, sp, 16
1136 ; RV64-LABEL: vmaxu_vx_nxv1i64:
1138 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1139 ; RV64-NEXT: vmaxu.vx v8, v8, a0, v0.t
1141 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1142 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1143 %v = call <vscale x 1 x i64> @llvm.vp.umax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
1144 ret <vscale x 1 x i64> %v
1147 define <vscale x 1 x i64> @vmaxu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
1148 ; RV32-LABEL: vmaxu_vx_nxv1i64_unmasked:
1150 ; RV32-NEXT: addi sp, sp, -16
1151 ; RV32-NEXT: .cfi_def_cfa_offset 16
1152 ; RV32-NEXT: sw a1, 12(sp)
1153 ; RV32-NEXT: sw a0, 8(sp)
1154 ; RV32-NEXT: addi a0, sp, 8
1155 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1156 ; RV32-NEXT: vlse64.v v9, (a0), zero
1157 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1158 ; RV32-NEXT: vmaxu.vv v8, v8, v9
1159 ; RV32-NEXT: addi sp, sp, 16
1162 ; RV64-LABEL: vmaxu_vx_nxv1i64_unmasked:
1164 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1165 ; RV64-NEXT: vmaxu.vx v8, v8, a0
1167 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1168 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1169 %v = call <vscale x 1 x i64> @llvm.vp.umax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1170 ret <vscale x 1 x i64> %v
1173 declare <vscale x 2 x i64> @llvm.vp.umax.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1175 define <vscale x 2 x i64> @vmaxu_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1176 ; CHECK-LABEL: vmaxu_vv_nxv2i64:
1178 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1179 ; CHECK-NEXT: vmaxu.vv v8, v8, v10, v0.t
1181 %v = call <vscale x 2 x i64> @llvm.vp.umax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
1182 ret <vscale x 2 x i64> %v
1185 define <vscale x 2 x i64> @vmaxu_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
1186 ; CHECK-LABEL: vmaxu_vv_nxv2i64_unmasked:
1188 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1189 ; CHECK-NEXT: vmaxu.vv v8, v8, v10
1191 %v = call <vscale x 2 x i64> @llvm.vp.umax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1192 ret <vscale x 2 x i64> %v
1195 define <vscale x 2 x i64> @vmaxu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1196 ; RV32-LABEL: vmaxu_vx_nxv2i64:
1198 ; RV32-NEXT: addi sp, sp, -16
1199 ; RV32-NEXT: .cfi_def_cfa_offset 16
1200 ; RV32-NEXT: sw a1, 12(sp)
1201 ; RV32-NEXT: sw a0, 8(sp)
1202 ; RV32-NEXT: addi a0, sp, 8
1203 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1204 ; RV32-NEXT: vlse64.v v10, (a0), zero
1205 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1206 ; RV32-NEXT: vmaxu.vv v8, v8, v10, v0.t
1207 ; RV32-NEXT: addi sp, sp, 16
1210 ; RV64-LABEL: vmaxu_vx_nxv2i64:
1212 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1213 ; RV64-NEXT: vmaxu.vx v8, v8, a0, v0.t
1215 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1216 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1217 %v = call <vscale x 2 x i64> @llvm.vp.umax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
1218 ret <vscale x 2 x i64> %v
1221 define <vscale x 2 x i64> @vmaxu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
1222 ; RV32-LABEL: vmaxu_vx_nxv2i64_unmasked:
1224 ; RV32-NEXT: addi sp, sp, -16
1225 ; RV32-NEXT: .cfi_def_cfa_offset 16
1226 ; RV32-NEXT: sw a1, 12(sp)
1227 ; RV32-NEXT: sw a0, 8(sp)
1228 ; RV32-NEXT: addi a0, sp, 8
1229 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1230 ; RV32-NEXT: vlse64.v v10, (a0), zero
1231 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1232 ; RV32-NEXT: vmaxu.vv v8, v8, v10
1233 ; RV32-NEXT: addi sp, sp, 16
1236 ; RV64-LABEL: vmaxu_vx_nxv2i64_unmasked:
1238 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1239 ; RV64-NEXT: vmaxu.vx v8, v8, a0
1241 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1242 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1243 %v = call <vscale x 2 x i64> @llvm.vp.umax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1244 ret <vscale x 2 x i64> %v
1247 declare <vscale x 4 x i64> @llvm.vp.umax.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1249 define <vscale x 4 x i64> @vmaxu_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1250 ; CHECK-LABEL: vmaxu_vv_nxv4i64:
1252 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1253 ; CHECK-NEXT: vmaxu.vv v8, v8, v12, v0.t
1255 %v = call <vscale x 4 x i64> @llvm.vp.umax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
1256 ret <vscale x 4 x i64> %v
1259 define <vscale x 4 x i64> @vmaxu_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
1260 ; CHECK-LABEL: vmaxu_vv_nxv4i64_unmasked:
1262 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1263 ; CHECK-NEXT: vmaxu.vv v8, v8, v12
1265 %v = call <vscale x 4 x i64> @llvm.vp.umax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1266 ret <vscale x 4 x i64> %v
1269 define <vscale x 4 x i64> @vmaxu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1270 ; RV32-LABEL: vmaxu_vx_nxv4i64:
1272 ; RV32-NEXT: addi sp, sp, -16
1273 ; RV32-NEXT: .cfi_def_cfa_offset 16
1274 ; RV32-NEXT: sw a1, 12(sp)
1275 ; RV32-NEXT: sw a0, 8(sp)
1276 ; RV32-NEXT: addi a0, sp, 8
1277 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1278 ; RV32-NEXT: vlse64.v v12, (a0), zero
1279 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1280 ; RV32-NEXT: vmaxu.vv v8, v8, v12, v0.t
1281 ; RV32-NEXT: addi sp, sp, 16
1284 ; RV64-LABEL: vmaxu_vx_nxv4i64:
1286 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1287 ; RV64-NEXT: vmaxu.vx v8, v8, a0, v0.t
1289 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1290 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1291 %v = call <vscale x 4 x i64> @llvm.vp.umax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
1292 ret <vscale x 4 x i64> %v
1295 define <vscale x 4 x i64> @vmaxu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1296 ; RV32-LABEL: vmaxu_vx_nxv4i64_unmasked:
1298 ; RV32-NEXT: addi sp, sp, -16
1299 ; RV32-NEXT: .cfi_def_cfa_offset 16
1300 ; RV32-NEXT: sw a1, 12(sp)
1301 ; RV32-NEXT: sw a0, 8(sp)
1302 ; RV32-NEXT: addi a0, sp, 8
1303 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1304 ; RV32-NEXT: vlse64.v v12, (a0), zero
1305 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1306 ; RV32-NEXT: vmaxu.vv v8, v8, v12
1307 ; RV32-NEXT: addi sp, sp, 16
1310 ; RV64-LABEL: vmaxu_vx_nxv4i64_unmasked:
1312 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1313 ; RV64-NEXT: vmaxu.vx v8, v8, a0
1315 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1316 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1317 %v = call <vscale x 4 x i64> @llvm.vp.umax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1318 ret <vscale x 4 x i64> %v
1321 declare <vscale x 8 x i64> @llvm.vp.umax.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1323 define <vscale x 8 x i64> @vmaxu_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1324 ; CHECK-LABEL: vmaxu_vv_nxv8i64:
1326 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1327 ; CHECK-NEXT: vmaxu.vv v8, v8, v16, v0.t
1329 %v = call <vscale x 8 x i64> @llvm.vp.umax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
1330 ret <vscale x 8 x i64> %v
1333 define <vscale x 8 x i64> @vmaxu_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
1334 ; CHECK-LABEL: vmaxu_vv_nxv8i64_unmasked:
1336 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1337 ; CHECK-NEXT: vmaxu.vv v8, v8, v16
1339 %v = call <vscale x 8 x i64> @llvm.vp.umax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1340 ret <vscale x 8 x i64> %v
1343 define <vscale x 8 x i64> @vmaxu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1344 ; RV32-LABEL: vmaxu_vx_nxv8i64:
1346 ; RV32-NEXT: addi sp, sp, -16
1347 ; RV32-NEXT: .cfi_def_cfa_offset 16
1348 ; RV32-NEXT: sw a1, 12(sp)
1349 ; RV32-NEXT: sw a0, 8(sp)
1350 ; RV32-NEXT: addi a0, sp, 8
1351 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1352 ; RV32-NEXT: vlse64.v v16, (a0), zero
1353 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1354 ; RV32-NEXT: vmaxu.vv v8, v8, v16, v0.t
1355 ; RV32-NEXT: addi sp, sp, 16
1358 ; RV64-LABEL: vmaxu_vx_nxv8i64:
1360 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1361 ; RV64-NEXT: vmaxu.vx v8, v8, a0, v0.t
1363 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1364 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1365 %v = call <vscale x 8 x i64> @llvm.vp.umax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1366 ret <vscale x 8 x i64> %v
1369 define <vscale x 8 x i64> @vmaxu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1370 ; RV32-LABEL: vmaxu_vx_nxv8i64_unmasked:
1372 ; RV32-NEXT: addi sp, sp, -16
1373 ; RV32-NEXT: .cfi_def_cfa_offset 16
1374 ; RV32-NEXT: sw a1, 12(sp)
1375 ; RV32-NEXT: sw a0, 8(sp)
1376 ; RV32-NEXT: addi a0, sp, 8
1377 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1378 ; RV32-NEXT: vlse64.v v16, (a0), zero
1379 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1380 ; RV32-NEXT: vmaxu.vv v8, v8, v16
1381 ; RV32-NEXT: addi sp, sp, 16
1384 ; RV64-LABEL: vmaxu_vx_nxv8i64_unmasked:
1386 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1387 ; RV64-NEXT: vmaxu.vx v8, v8, a0
1389 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1390 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1391 %v = call <vscale x 8 x i64> @llvm.vp.umax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1392 ret <vscale x 8 x i64> %v