1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
4 declare { <vscale x 1 x i8>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>)
6 define <vscale x 1 x i8> @umulo_nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %y) {
7 ; CHECK-LABEL: umulo_nxv1i8:
9 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
10 ; CHECK-NEXT: vmulhu.vv v10, v8, v9
11 ; CHECK-NEXT: vmsne.vi v0, v10, 0
12 ; CHECK-NEXT: vmul.vv v8, v8, v9
13 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
15 %a = call { <vscale x 1 x i8>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %y)
16 %b = extractvalue { <vscale x 1 x i8>, <vscale x 1 x i1> } %a, 0
17 %c = extractvalue { <vscale x 1 x i8>, <vscale x 1 x i1> } %a, 1
18 %d = select <vscale x 1 x i1> %c, <vscale x 1 x i8> zeroinitializer, <vscale x 1 x i8> %b
19 ret <vscale x 1 x i8> %d
22 declare { <vscale x 2 x i8>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
24 define <vscale x 2 x i8> @umulo_nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %y) {
25 ; CHECK-LABEL: umulo_nxv2i8:
27 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
28 ; CHECK-NEXT: vmulhu.vv v10, v8, v9
29 ; CHECK-NEXT: vmsne.vi v0, v10, 0
30 ; CHECK-NEXT: vmul.vv v8, v8, v9
31 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
33 %a = call { <vscale x 2 x i8>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %y)
34 %b = extractvalue { <vscale x 2 x i8>, <vscale x 2 x i1> } %a, 0
35 %c = extractvalue { <vscale x 2 x i8>, <vscale x 2 x i1> } %a, 1
36 %d = select <vscale x 2 x i1> %c, <vscale x 2 x i8> zeroinitializer, <vscale x 2 x i8> %b
37 ret <vscale x 2 x i8> %d
40 declare { <vscale x 4 x i8>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
42 define <vscale x 4 x i8> @umulo_nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %y) {
43 ; CHECK-LABEL: umulo_nxv4i8:
45 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
46 ; CHECK-NEXT: vmulhu.vv v10, v8, v9
47 ; CHECK-NEXT: vmsne.vi v0, v10, 0
48 ; CHECK-NEXT: vmul.vv v8, v8, v9
49 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
51 %a = call { <vscale x 4 x i8>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %y)
52 %b = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i1> } %a, 0
53 %c = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i1> } %a, 1
54 %d = select <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer, <vscale x 4 x i8> %b
55 ret <vscale x 4 x i8> %d
58 declare { <vscale x 8 x i8>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
60 define <vscale x 8 x i8> @umulo_nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y) {
61 ; CHECK-LABEL: umulo_nxv8i8:
63 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
64 ; CHECK-NEXT: vmulhu.vv v10, v8, v9
65 ; CHECK-NEXT: vmsne.vi v0, v10, 0
66 ; CHECK-NEXT: vmul.vv v8, v8, v9
67 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
69 %a = call { <vscale x 8 x i8>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y)
70 %b = extractvalue { <vscale x 8 x i8>, <vscale x 8 x i1> } %a, 0
71 %c = extractvalue { <vscale x 8 x i8>, <vscale x 8 x i1> } %a, 1
72 %d = select <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer, <vscale x 8 x i8> %b
73 ret <vscale x 8 x i8> %d
76 declare { <vscale x 16 x i8>, <vscale x 16 x i1> } @llvm.umul.with.overflow.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
78 define <vscale x 16 x i8> @umulo_nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y) {
79 ; CHECK-LABEL: umulo_nxv16i8:
81 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
82 ; CHECK-NEXT: vmulhu.vv v12, v8, v10
83 ; CHECK-NEXT: vmsne.vi v0, v12, 0
84 ; CHECK-NEXT: vmul.vv v8, v8, v10
85 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
87 %a = call { <vscale x 16 x i8>, <vscale x 16 x i1> } @llvm.umul.with.overflow.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y)
88 %b = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i1> } %a, 0
89 %c = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i1> } %a, 1
90 %d = select <vscale x 16 x i1> %c, <vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> %b
91 ret <vscale x 16 x i8> %d
94 declare { <vscale x 32 x i8>, <vscale x 32 x i1> } @llvm.umul.with.overflow.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>)
96 define <vscale x 32 x i8> @umulo_nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %y) {
97 ; CHECK-LABEL: umulo_nxv32i8:
99 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
100 ; CHECK-NEXT: vmulhu.vv v16, v8, v12
101 ; CHECK-NEXT: vmsne.vi v0, v16, 0
102 ; CHECK-NEXT: vmul.vv v8, v8, v12
103 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
105 %a = call { <vscale x 32 x i8>, <vscale x 32 x i1> } @llvm.umul.with.overflow.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %y)
106 %b = extractvalue { <vscale x 32 x i8>, <vscale x 32 x i1> } %a, 0
107 %c = extractvalue { <vscale x 32 x i8>, <vscale x 32 x i1> } %a, 1
108 %d = select <vscale x 32 x i1> %c, <vscale x 32 x i8> zeroinitializer, <vscale x 32 x i8> %b
109 ret <vscale x 32 x i8> %d
112 declare { <vscale x 64 x i8>, <vscale x 64 x i1> } @llvm.umul.with.overflow.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>)
114 define <vscale x 64 x i8> @umulo_nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %y) {
115 ; CHECK-LABEL: umulo_nxv64i8:
117 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
118 ; CHECK-NEXT: vmulhu.vv v24, v8, v16
119 ; CHECK-NEXT: vmsne.vi v0, v24, 0
120 ; CHECK-NEXT: vmul.vv v8, v8, v16
121 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
123 %a = call { <vscale x 64 x i8>, <vscale x 64 x i1> } @llvm.umul.with.overflow.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %y)
124 %b = extractvalue { <vscale x 64 x i8>, <vscale x 64 x i1> } %a, 0
125 %c = extractvalue { <vscale x 64 x i8>, <vscale x 64 x i1> } %a, 1
126 %d = select <vscale x 64 x i1> %c, <vscale x 64 x i8> zeroinitializer, <vscale x 64 x i8> %b
127 ret <vscale x 64 x i8> %d
130 declare { <vscale x 1 x i16>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>)
132 define <vscale x 1 x i16> @umulo_nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %y) {
133 ; CHECK-LABEL: umulo_nxv1i16:
135 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
136 ; CHECK-NEXT: vmulhu.vv v10, v8, v9
137 ; CHECK-NEXT: vmsne.vi v0, v10, 0
138 ; CHECK-NEXT: vmul.vv v8, v8, v9
139 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
141 %a = call { <vscale x 1 x i16>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %y)
142 %b = extractvalue { <vscale x 1 x i16>, <vscale x 1 x i1> } %a, 0
143 %c = extractvalue { <vscale x 1 x i16>, <vscale x 1 x i1> } %a, 1
144 %d = select <vscale x 1 x i1> %c, <vscale x 1 x i16> zeroinitializer, <vscale x 1 x i16> %b
145 ret <vscale x 1 x i16> %d
148 declare { <vscale x 2 x i16>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
150 define <vscale x 2 x i16> @umulo_nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %y) {
151 ; CHECK-LABEL: umulo_nxv2i16:
153 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
154 ; CHECK-NEXT: vmulhu.vv v10, v8, v9
155 ; CHECK-NEXT: vmsne.vi v0, v10, 0
156 ; CHECK-NEXT: vmul.vv v8, v8, v9
157 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
159 %a = call { <vscale x 2 x i16>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %y)
160 %b = extractvalue { <vscale x 2 x i16>, <vscale x 2 x i1> } %a, 0
161 %c = extractvalue { <vscale x 2 x i16>, <vscale x 2 x i1> } %a, 1
162 %d = select <vscale x 2 x i1> %c, <vscale x 2 x i16> zeroinitializer, <vscale x 2 x i16> %b
163 ret <vscale x 2 x i16> %d
166 declare { <vscale x 4 x i16>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
168 define <vscale x 4 x i16> @umulo_nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %y) {
169 ; CHECK-LABEL: umulo_nxv4i16:
171 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
172 ; CHECK-NEXT: vmulhu.vv v10, v8, v9
173 ; CHECK-NEXT: vmsne.vi v0, v10, 0
174 ; CHECK-NEXT: vmul.vv v8, v8, v9
175 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
177 %a = call { <vscale x 4 x i16>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %y)
178 %b = extractvalue { <vscale x 4 x i16>, <vscale x 4 x i1> } %a, 0
179 %c = extractvalue { <vscale x 4 x i16>, <vscale x 4 x i1> } %a, 1
180 %d = select <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer, <vscale x 4 x i16> %b
181 ret <vscale x 4 x i16> %d
184 declare { <vscale x 8 x i16>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
186 define <vscale x 8 x i16> @umulo_nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y) {
187 ; CHECK-LABEL: umulo_nxv8i16:
189 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
190 ; CHECK-NEXT: vmulhu.vv v12, v8, v10
191 ; CHECK-NEXT: vmsne.vi v0, v12, 0
192 ; CHECK-NEXT: vmul.vv v8, v8, v10
193 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
195 %a = call { <vscale x 8 x i16>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y)
196 %b = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i1> } %a, 0
197 %c = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i1> } %a, 1
198 %d = select <vscale x 8 x i1> %c, <vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> %b
199 ret <vscale x 8 x i16> %d
202 declare { <vscale x 16 x i16>, <vscale x 16 x i1> } @llvm.umul.with.overflow.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
204 define <vscale x 16 x i16> @umulo_nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %y) {
205 ; CHECK-LABEL: umulo_nxv16i16:
207 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
208 ; CHECK-NEXT: vmulhu.vv v16, v8, v12
209 ; CHECK-NEXT: vmsne.vi v0, v16, 0
210 ; CHECK-NEXT: vmul.vv v8, v8, v12
211 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
213 %a = call { <vscale x 16 x i16>, <vscale x 16 x i1> } @llvm.umul.with.overflow.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %y)
214 %b = extractvalue { <vscale x 16 x i16>, <vscale x 16 x i1> } %a, 0
215 %c = extractvalue { <vscale x 16 x i16>, <vscale x 16 x i1> } %a, 1
216 %d = select <vscale x 16 x i1> %c, <vscale x 16 x i16> zeroinitializer, <vscale x 16 x i16> %b
217 ret <vscale x 16 x i16> %d
220 declare { <vscale x 32 x i16>, <vscale x 32 x i1> } @llvm.umul.with.overflow.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>)
222 define <vscale x 32 x i16> @umulo_nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %y) {
223 ; CHECK-LABEL: umulo_nxv32i16:
225 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
226 ; CHECK-NEXT: vmulhu.vv v24, v8, v16
227 ; CHECK-NEXT: vmsne.vi v0, v24, 0
228 ; CHECK-NEXT: vmul.vv v8, v8, v16
229 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
231 %a = call { <vscale x 32 x i16>, <vscale x 32 x i1> } @llvm.umul.with.overflow.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %y)
232 %b = extractvalue { <vscale x 32 x i16>, <vscale x 32 x i1> } %a, 0
233 %c = extractvalue { <vscale x 32 x i16>, <vscale x 32 x i1> } %a, 1
234 %d = select <vscale x 32 x i1> %c, <vscale x 32 x i16> zeroinitializer, <vscale x 32 x i16> %b
235 ret <vscale x 32 x i16> %d
238 declare { <vscale x 1 x i32>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>)
240 define <vscale x 1 x i32> @umulo_nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %y) {
241 ; CHECK-LABEL: umulo_nxv1i32:
243 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
244 ; CHECK-NEXT: vmulhu.vv v10, v8, v9
245 ; CHECK-NEXT: vmsne.vi v0, v10, 0
246 ; CHECK-NEXT: vmul.vv v8, v8, v9
247 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
249 %a = call { <vscale x 1 x i32>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %y)
250 %b = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i1> } %a, 0
251 %c = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i1> } %a, 1
252 %d = select <vscale x 1 x i1> %c, <vscale x 1 x i32> zeroinitializer, <vscale x 1 x i32> %b
253 ret <vscale x 1 x i32> %d
256 declare { <vscale x 2 x i32>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
258 define <vscale x 2 x i32> @umulo_nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y) {
259 ; CHECK-LABEL: umulo_nxv2i32:
261 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
262 ; CHECK-NEXT: vmulhu.vv v10, v8, v9
263 ; CHECK-NEXT: vmsne.vi v0, v10, 0
264 ; CHECK-NEXT: vmul.vv v8, v8, v9
265 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
267 %a = call { <vscale x 2 x i32>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y)
268 %b = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i1> } %a, 0
269 %c = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i1> } %a, 1
270 %d = select <vscale x 2 x i1> %c, <vscale x 2 x i32> zeroinitializer, <vscale x 2 x i32> %b
271 ret <vscale x 2 x i32> %d
274 declare { <vscale x 4 x i32>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
276 define <vscale x 4 x i32> @umulo_nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y) {
277 ; CHECK-LABEL: umulo_nxv4i32:
279 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
280 ; CHECK-NEXT: vmulhu.vv v12, v8, v10
281 ; CHECK-NEXT: vmsne.vi v0, v12, 0
282 ; CHECK-NEXT: vmul.vv v8, v8, v10
283 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
285 %a = call { <vscale x 4 x i32>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y)
286 %b = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i1> } %a, 0
287 %c = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i1> } %a, 1
288 %d = select <vscale x 4 x i1> %c, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> %b
289 ret <vscale x 4 x i32> %d
292 declare { <vscale x 8 x i32>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
294 define <vscale x 8 x i32> @umulo_nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %y) {
295 ; CHECK-LABEL: umulo_nxv8i32:
297 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
298 ; CHECK-NEXT: vmulhu.vv v16, v8, v12
299 ; CHECK-NEXT: vmsne.vi v0, v16, 0
300 ; CHECK-NEXT: vmul.vv v8, v8, v12
301 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
303 %a = call { <vscale x 8 x i32>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %y)
304 %b = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i1> } %a, 0
305 %c = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i1> } %a, 1
306 %d = select <vscale x 8 x i1> %c, <vscale x 8 x i32> zeroinitializer, <vscale x 8 x i32> %b
307 ret <vscale x 8 x i32> %d
310 declare { <vscale x 16 x i32>, <vscale x 16 x i1> } @llvm.umul.with.overflow.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
312 define <vscale x 16 x i32> @umulo_nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %y) {
313 ; CHECK-LABEL: umulo_nxv16i32:
315 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
316 ; CHECK-NEXT: vmulhu.vv v24, v8, v16
317 ; CHECK-NEXT: vmsne.vi v0, v24, 0
318 ; CHECK-NEXT: vmul.vv v8, v8, v16
319 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
321 %a = call { <vscale x 16 x i32>, <vscale x 16 x i1> } @llvm.umul.with.overflow.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %y)
322 %b = extractvalue { <vscale x 16 x i32>, <vscale x 16 x i1> } %a, 0
323 %c = extractvalue { <vscale x 16 x i32>, <vscale x 16 x i1> } %a, 1
324 %d = select <vscale x 16 x i1> %c, <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32> %b
325 ret <vscale x 16 x i32> %d
328 declare { <vscale x 1 x i64>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>)
330 define <vscale x 1 x i64> @umulo_nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %y) {
331 ; CHECK-LABEL: umulo_nxv1i64:
333 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
334 ; CHECK-NEXT: vmulhu.vv v10, v8, v9
335 ; CHECK-NEXT: vmsne.vi v0, v10, 0
336 ; CHECK-NEXT: vmul.vv v8, v8, v9
337 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
339 %a = call { <vscale x 1 x i64>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %y)
340 %b = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i1> } %a, 0
341 %c = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i1> } %a, 1
342 %d = select <vscale x 1 x i1> %c, <vscale x 1 x i64> zeroinitializer, <vscale x 1 x i64> %b
343 ret <vscale x 1 x i64> %d
346 declare { <vscale x 2 x i64>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
348 define <vscale x 2 x i64> @umulo_nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y) {
349 ; CHECK-LABEL: umulo_nxv2i64:
351 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
352 ; CHECK-NEXT: vmulhu.vv v12, v8, v10
353 ; CHECK-NEXT: vmsne.vi v0, v12, 0
354 ; CHECK-NEXT: vmul.vv v8, v8, v10
355 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
357 %a = call { <vscale x 2 x i64>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y)
358 %b = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i1> } %a, 0
359 %c = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i1> } %a, 1
360 %d = select <vscale x 2 x i1> %c, <vscale x 2 x i64> zeroinitializer, <vscale x 2 x i64> %b
361 ret <vscale x 2 x i64> %d
364 declare { <vscale x 4 x i64>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
366 define <vscale x 4 x i64> @umulo_nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %y) {
367 ; CHECK-LABEL: umulo_nxv4i64:
369 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
370 ; CHECK-NEXT: vmulhu.vv v16, v8, v12
371 ; CHECK-NEXT: vmsne.vi v0, v16, 0
372 ; CHECK-NEXT: vmul.vv v8, v8, v12
373 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
375 %a = call { <vscale x 4 x i64>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %y)
376 %b = extractvalue { <vscale x 4 x i64>, <vscale x 4 x i1> } %a, 0
377 %c = extractvalue { <vscale x 4 x i64>, <vscale x 4 x i1> } %a, 1
378 %d = select <vscale x 4 x i1> %c, <vscale x 4 x i64> zeroinitializer, <vscale x 4 x i64> %b
379 ret <vscale x 4 x i64> %d
382 declare { <vscale x 8 x i64>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
384 define <vscale x 8 x i64> @umulo_nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %y) {
385 ; CHECK-LABEL: umulo_nxv8i64:
387 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
388 ; CHECK-NEXT: vmulhu.vv v24, v8, v16
389 ; CHECK-NEXT: vmsne.vi v0, v24, 0
390 ; CHECK-NEXT: vmul.vv v8, v8, v16
391 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
393 %a = call { <vscale x 8 x i64>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %y)
394 %b = extractvalue { <vscale x 8 x i64>, <vscale x 8 x i1> } %a, 0
395 %c = extractvalue { <vscale x 8 x i64>, <vscale x 8 x i1> } %a, 1
396 %d = select <vscale x 8 x i1> %c, <vscale x 8 x i64> zeroinitializer, <vscale x 8 x i64> %b
397 ret <vscale x 8 x i64> %d