1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s --check-prefixes=ALL,MIPS
3 ; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s --check-prefixes=ALL,MIPSEL
5 define void @add_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
6 ; ALL-LABEL: add_v16i8:
8 ; ALL-NEXT: ld.b $w0, 0($6)
9 ; ALL-NEXT: ld.b $w1, 0($5)
10 ; ALL-NEXT: addv.b $w0, $w1, $w0
12 ; ALL-NEXT: st.b $w0, 0($4)
13 %1 = load <16 x i8>, ptr %a
14 %2 = load <16 x i8>, ptr %b
15 %3 = add <16 x i8> %1, %2
16 store <16 x i8> %3, ptr %c
20 define void @add_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
21 ; ALL-LABEL: add_v8i16:
23 ; ALL-NEXT: ld.h $w0, 0($6)
24 ; ALL-NEXT: ld.h $w1, 0($5)
25 ; ALL-NEXT: addv.h $w0, $w1, $w0
27 ; ALL-NEXT: st.h $w0, 0($4)
28 %1 = load <8 x i16>, ptr %a
29 %2 = load <8 x i16>, ptr %b
30 %3 = add <8 x i16> %1, %2
31 store <8 x i16> %3, ptr %c
35 define void @add_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
36 ; ALL-LABEL: add_v4i32:
38 ; ALL-NEXT: ld.w $w0, 0($6)
39 ; ALL-NEXT: ld.w $w1, 0($5)
40 ; ALL-NEXT: addv.w $w0, $w1, $w0
42 ; ALL-NEXT: st.w $w0, 0($4)
43 %1 = load <4 x i32>, ptr %a
44 %2 = load <4 x i32>, ptr %b
45 %3 = add <4 x i32> %1, %2
46 store <4 x i32> %3, ptr %c
50 define void @add_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
51 ; ALL-LABEL: add_v2i64:
53 ; ALL-NEXT: ld.d $w0, 0($6)
54 ; ALL-NEXT: ld.d $w1, 0($5)
55 ; ALL-NEXT: addv.d $w0, $w1, $w0
57 ; ALL-NEXT: st.d $w0, 0($4)
58 %1 = load <2 x i64>, ptr %a
59 %2 = load <2 x i64>, ptr %b
60 %3 = add <2 x i64> %1, %2
61 store <2 x i64> %3, ptr %c
65 define void @add_v16i8_i(ptr %c, ptr %a) nounwind {
66 ; ALL-LABEL: add_v16i8_i:
68 ; ALL-NEXT: ld.b $w0, 0($5)
69 ; ALL-NEXT: addvi.b $w0, $w0, 1
71 ; ALL-NEXT: st.b $w0, 0($4)
72 %1 = load <16 x i8>, ptr %a
73 %2 = add <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
74 i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
75 store <16 x i8> %2, ptr %c
79 define void @add_v8i16_i(ptr %c, ptr %a) nounwind {
80 ; ALL-LABEL: add_v8i16_i:
82 ; ALL-NEXT: ld.h $w0, 0($5)
83 ; ALL-NEXT: addvi.h $w0, $w0, 1
85 ; ALL-NEXT: st.h $w0, 0($4)
86 %1 = load <8 x i16>, ptr %a
87 %2 = add <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1,
88 i16 1, i16 1, i16 1, i16 1>
89 store <8 x i16> %2, ptr %c
93 define void @add_v4i32_i(ptr %c, ptr %a) nounwind {
94 ; ALL-LABEL: add_v4i32_i:
96 ; ALL-NEXT: ld.w $w0, 0($5)
97 ; ALL-NEXT: addvi.w $w0, $w0, 1
99 ; ALL-NEXT: st.w $w0, 0($4)
100 %1 = load <4 x i32>, ptr %a
101 %2 = add <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
102 store <4 x i32> %2, ptr %c
106 define void @add_v2i64_i(ptr %c, ptr %a) nounwind {
107 ; ALL-LABEL: add_v2i64_i:
109 ; ALL-NEXT: ld.d $w0, 0($5)
110 ; ALL-NEXT: addvi.d $w0, $w0, 1
112 ; ALL-NEXT: st.d $w0, 0($4)
113 %1 = load <2 x i64>, ptr %a
114 %2 = add <2 x i64> %1, <i64 1, i64 1>
115 store <2 x i64> %2, ptr %c
119 define void @sub_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
120 ; ALL-LABEL: sub_v16i8:
122 ; ALL-NEXT: ld.b $w0, 0($6)
123 ; ALL-NEXT: ld.b $w1, 0($5)
124 ; ALL-NEXT: subv.b $w0, $w1, $w0
126 ; ALL-NEXT: st.b $w0, 0($4)
127 %1 = load <16 x i8>, ptr %a
128 %2 = load <16 x i8>, ptr %b
129 %3 = sub <16 x i8> %1, %2
130 store <16 x i8> %3, ptr %c
134 define void @sub_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
135 ; ALL-LABEL: sub_v8i16:
137 ; ALL-NEXT: ld.h $w0, 0($6)
138 ; ALL-NEXT: ld.h $w1, 0($5)
139 ; ALL-NEXT: subv.h $w0, $w1, $w0
141 ; ALL-NEXT: st.h $w0, 0($4)
142 %1 = load <8 x i16>, ptr %a
143 %2 = load <8 x i16>, ptr %b
144 %3 = sub <8 x i16> %1, %2
145 store <8 x i16> %3, ptr %c
149 define void @sub_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
150 ; ALL-LABEL: sub_v4i32:
152 ; ALL-NEXT: ld.w $w0, 0($6)
153 ; ALL-NEXT: ld.w $w1, 0($5)
154 ; ALL-NEXT: subv.w $w0, $w1, $w0
156 ; ALL-NEXT: st.w $w0, 0($4)
157 %1 = load <4 x i32>, ptr %a
158 %2 = load <4 x i32>, ptr %b
159 %3 = sub <4 x i32> %1, %2
160 store <4 x i32> %3, ptr %c
164 define void @sub_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
165 ; ALL-LABEL: sub_v2i64:
167 ; ALL-NEXT: ld.d $w0, 0($6)
168 ; ALL-NEXT: ld.d $w1, 0($5)
169 ; ALL-NEXT: subv.d $w0, $w1, $w0
171 ; ALL-NEXT: st.d $w0, 0($4)
172 %1 = load <2 x i64>, ptr %a
173 %2 = load <2 x i64>, ptr %b
174 %3 = sub <2 x i64> %1, %2
175 store <2 x i64> %3, ptr %c
179 define void @sub_v16i8_i(ptr %c, ptr %a) nounwind {
180 ; ALL-LABEL: sub_v16i8_i:
182 ; ALL-NEXT: ld.b $w0, 0($5)
183 ; ALL-NEXT: subvi.b $w0, $w0, 1
185 ; ALL-NEXT: st.b $w0, 0($4)
186 %1 = load <16 x i8>, ptr %a
187 %2 = sub <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
188 i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
189 store <16 x i8> %2, ptr %c
193 define void @sub_v16i8_i_negated(ptr %c, ptr %a) nounwind {
194 ; ALL-LABEL: sub_v16i8_i_negated:
196 ; ALL-NEXT: ld.b $w0, 0($5)
197 ; ALL-NEXT: subvi.b $w0, $w0, 1
199 ; ALL-NEXT: st.b $w0, 0($4)
200 %1 = load <16 x i8>, ptr %a
201 %2 = add <16 x i8> %1, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
202 i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
203 store <16 x i8> %2, ptr %c
207 define void @sub_v8i16_i(ptr %c, ptr %a) nounwind {
208 ; ALL-LABEL: sub_v8i16_i:
210 ; ALL-NEXT: ld.h $w0, 0($5)
211 ; ALL-NEXT: subvi.h $w0, $w0, 1
213 ; ALL-NEXT: st.h $w0, 0($4)
214 %1 = load <8 x i16>, ptr %a
215 %2 = sub <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1,
216 i16 1, i16 1, i16 1, i16 1>
217 store <8 x i16> %2, ptr %c
221 define void @sub_v8i16_i_negated(ptr %c, ptr %a) nounwind {
222 ; ALL-LABEL: sub_v8i16_i_negated:
224 ; ALL-NEXT: ld.h $w0, 0($5)
225 ; ALL-NEXT: subvi.h $w0, $w0, 1
227 ; ALL-NEXT: st.h $w0, 0($4)
228 %1 = load <8 x i16>, ptr %a
229 %2 = add <8 x i16> %1, <i16 -1, i16 -1, i16 -1, i16 -1,
230 i16 -1, i16 -1, i16 -1, i16 -1>
231 store <8 x i16> %2, ptr %c
235 define void @sub_v4i32_i(ptr %c, ptr %a) nounwind {
236 ; ALL-LABEL: sub_v4i32_i:
238 ; ALL-NEXT: ld.w $w0, 0($5)
239 ; ALL-NEXT: subvi.w $w0, $w0, 1
241 ; ALL-NEXT: st.w $w0, 0($4)
242 %1 = load <4 x i32>, ptr %a
243 %2 = sub <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
244 store <4 x i32> %2, ptr %c
248 define void @sub_v4i32_i_negated(ptr %c, ptr %a) nounwind {
249 ; ALL-LABEL: sub_v4i32_i_negated:
251 ; ALL-NEXT: ld.w $w0, 0($5)
252 ; ALL-NEXT: subvi.w $w0, $w0, 1
254 ; ALL-NEXT: st.w $w0, 0($4)
255 %1 = load <4 x i32>, ptr %a
256 %2 = add <4 x i32> %1, <i32 -1, i32 -1, i32 -1, i32 -1>
257 store <4 x i32> %2, ptr %c
261 define void @sub_v2i64_i(ptr %c, ptr %a) nounwind {
262 ; ALL-LABEL: sub_v2i64_i:
264 ; ALL-NEXT: ld.d $w0, 0($5)
265 ; ALL-NEXT: subvi.d $w0, $w0, 1
267 ; ALL-NEXT: st.d $w0, 0($4)
268 %1 = load <2 x i64>, ptr %a
269 %2 = sub <2 x i64> %1, <i64 1, i64 1>
270 store <2 x i64> %2, ptr %c
274 define void @sub_v2i64_i_negated(ptr %c, ptr %a) nounwind {
275 ; MIPS-LABEL: sub_v2i64_i_negated:
277 ; MIPS-NEXT: ldi.b $w0, -1
278 ; MIPS-NEXT: shf.w $w0, $w0, 177
279 ; MIPS-NEXT: ld.d $w1, 0($5)
280 ; MIPS-NEXT: addv.d $w0, $w1, $w0
282 ; MIPS-NEXT: st.d $w0, 0($4)
284 ; MIPSEL-LABEL: sub_v2i64_i_negated:
286 ; MIPSEL-NEXT: ldi.b $w0, -1
287 ; MIPSEL-NEXT: ld.d $w1, 0($5)
288 ; MIPSEL-NEXT: addv.d $w0, $w1, $w0
289 ; MIPSEL-NEXT: jr $ra
290 ; MIPSEL-NEXT: st.d $w0, 0($4)
291 %1 = load <2 x i64>, ptr %a
292 %2 = add <2 x i64> %1, <i64 -1, i64 -1>
293 store <2 x i64> %2, ptr %c
297 define void @mul_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
298 ; ALL-LABEL: mul_v16i8:
300 ; ALL-NEXT: ld.b $w0, 0($6)
301 ; ALL-NEXT: ld.b $w1, 0($5)
302 ; ALL-NEXT: mulv.b $w0, $w1, $w0
304 ; ALL-NEXT: st.b $w0, 0($4)
305 %1 = load <16 x i8>, ptr %a
306 %2 = load <16 x i8>, ptr %b
307 %3 = mul <16 x i8> %1, %2
308 store <16 x i8> %3, ptr %c
312 define void @mul_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
313 ; ALL-LABEL: mul_v8i16:
315 ; ALL-NEXT: ld.h $w0, 0($6)
316 ; ALL-NEXT: ld.h $w1, 0($5)
317 ; ALL-NEXT: mulv.h $w0, $w1, $w0
319 ; ALL-NEXT: st.h $w0, 0($4)
320 %1 = load <8 x i16>, ptr %a
321 %2 = load <8 x i16>, ptr %b
322 %3 = mul <8 x i16> %1, %2
323 store <8 x i16> %3, ptr %c
327 define void @mul_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
328 ; ALL-LABEL: mul_v4i32:
330 ; ALL-NEXT: ld.w $w0, 0($6)
331 ; ALL-NEXT: ld.w $w1, 0($5)
332 ; ALL-NEXT: mulv.w $w0, $w1, $w0
334 ; ALL-NEXT: st.w $w0, 0($4)
335 %1 = load <4 x i32>, ptr %a
336 %2 = load <4 x i32>, ptr %b
337 %3 = mul <4 x i32> %1, %2
338 store <4 x i32> %3, ptr %c
342 define void @mul_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
343 ; ALL-LABEL: mul_v2i64:
345 ; ALL-NEXT: ld.d $w0, 0($6)
346 ; ALL-NEXT: ld.d $w1, 0($5)
347 ; ALL-NEXT: mulv.d $w0, $w1, $w0
349 ; ALL-NEXT: st.d $w0, 0($4)
350 %1 = load <2 x i64>, ptr %a
351 %2 = load <2 x i64>, ptr %b
352 %3 = mul <2 x i64> %1, %2
353 store <2 x i64> %3, ptr %c
357 define void @maddv_v16i8(ptr %d, ptr %a, ptr %b,
358 ; ALL-LABEL: maddv_v16i8:
360 ; ALL-NEXT: ld.b $w0, 0($7)
361 ; ALL-NEXT: ld.b $w1, 0($6)
362 ; ALL-NEXT: ld.b $w2, 0($5)
363 ; ALL-NEXT: maddv.b $w2, $w1, $w0
365 ; ALL-NEXT: st.b $w2, 0($4)
367 %1 = load <16 x i8>, ptr %a
368 %2 = load <16 x i8>, ptr %b
369 %3 = load <16 x i8>, ptr %c
370 %4 = mul <16 x i8> %2, %3
371 %5 = add <16 x i8> %4, %1
372 store <16 x i8> %5, ptr %d
376 define void @maddv_v8i16(ptr %d, ptr %a, ptr %b,
377 ; ALL-LABEL: maddv_v8i16:
379 ; ALL-NEXT: ld.h $w0, 0($7)
380 ; ALL-NEXT: ld.h $w1, 0($6)
381 ; ALL-NEXT: ld.h $w2, 0($5)
382 ; ALL-NEXT: maddv.h $w2, $w1, $w0
384 ; ALL-NEXT: st.h $w2, 0($4)
386 %1 = load <8 x i16>, ptr %a
387 %2 = load <8 x i16>, ptr %b
388 %3 = load <8 x i16>, ptr %c
389 %4 = mul <8 x i16> %2, %3
390 %5 = add <8 x i16> %4, %1
391 store <8 x i16> %5, ptr %d
395 define void @maddv_v4i32(ptr %d, ptr %a, ptr %b,
396 ; ALL-LABEL: maddv_v4i32:
398 ; ALL-NEXT: ld.w $w0, 0($7)
399 ; ALL-NEXT: ld.w $w1, 0($6)
400 ; ALL-NEXT: ld.w $w2, 0($5)
401 ; ALL-NEXT: maddv.w $w2, $w1, $w0
403 ; ALL-NEXT: st.w $w2, 0($4)
405 %1 = load <4 x i32>, ptr %a
406 %2 = load <4 x i32>, ptr %b
407 %3 = load <4 x i32>, ptr %c
408 %4 = mul <4 x i32> %2, %3
409 %5 = add <4 x i32> %4, %1
410 store <4 x i32> %5, ptr %d
414 define void @maddv_v2i64(ptr %d, ptr %a, ptr %b,
415 ; ALL-LABEL: maddv_v2i64:
417 ; ALL-NEXT: ld.d $w0, 0($7)
418 ; ALL-NEXT: ld.d $w1, 0($6)
419 ; ALL-NEXT: ld.d $w2, 0($5)
420 ; ALL-NEXT: maddv.d $w2, $w1, $w0
422 ; ALL-NEXT: st.d $w2, 0($4)
424 %1 = load <2 x i64>, ptr %a
425 %2 = load <2 x i64>, ptr %b
426 %3 = load <2 x i64>, ptr %c
427 %4 = mul <2 x i64> %2, %3
428 %5 = add <2 x i64> %4, %1
429 store <2 x i64> %5, ptr %d
433 define void @msubv_v16i8(ptr %d, ptr %a, ptr %b,
434 ; ALL-LABEL: msubv_v16i8:
436 ; ALL-NEXT: ld.b $w0, 0($7)
437 ; ALL-NEXT: ld.b $w1, 0($6)
438 ; ALL-NEXT: ld.b $w2, 0($5)
439 ; ALL-NEXT: msubv.b $w2, $w1, $w0
441 ; ALL-NEXT: st.b $w2, 0($4)
443 %1 = load <16 x i8>, ptr %a
444 %2 = load <16 x i8>, ptr %b
445 %3 = load <16 x i8>, ptr %c
446 %4 = mul <16 x i8> %2, %3
447 %5 = sub <16 x i8> %1, %4
448 store <16 x i8> %5, ptr %d
452 define void @msubv_v8i16(ptr %d, ptr %a, ptr %b,
453 ; ALL-LABEL: msubv_v8i16:
455 ; ALL-NEXT: ld.h $w0, 0($7)
456 ; ALL-NEXT: ld.h $w1, 0($6)
457 ; ALL-NEXT: ld.h $w2, 0($5)
458 ; ALL-NEXT: msubv.h $w2, $w1, $w0
460 ; ALL-NEXT: st.h $w2, 0($4)
462 %1 = load <8 x i16>, ptr %a
463 %2 = load <8 x i16>, ptr %b
464 %3 = load <8 x i16>, ptr %c
465 %4 = mul <8 x i16> %2, %3
466 %5 = sub <8 x i16> %1, %4
467 store <8 x i16> %5, ptr %d
471 define void @msubv_v4i32(ptr %d, ptr %a, ptr %b,
472 ; ALL-LABEL: msubv_v4i32:
474 ; ALL-NEXT: ld.w $w0, 0($7)
475 ; ALL-NEXT: ld.w $w1, 0($6)
476 ; ALL-NEXT: ld.w $w2, 0($5)
477 ; ALL-NEXT: msubv.w $w2, $w1, $w0
479 ; ALL-NEXT: st.w $w2, 0($4)
481 %1 = load <4 x i32>, ptr %a
482 %2 = load <4 x i32>, ptr %b
483 %3 = load <4 x i32>, ptr %c
484 %4 = mul <4 x i32> %2, %3
485 %5 = sub <4 x i32> %1, %4
486 store <4 x i32> %5, ptr %d
490 define void @msubv_v2i64(ptr %d, ptr %a, ptr %b,
491 ; ALL-LABEL: msubv_v2i64:
493 ; ALL-NEXT: ld.d $w0, 0($7)
494 ; ALL-NEXT: ld.d $w1, 0($6)
495 ; ALL-NEXT: ld.d $w2, 0($5)
496 ; ALL-NEXT: msubv.d $w2, $w1, $w0
498 ; ALL-NEXT: st.d $w2, 0($4)
500 %1 = load <2 x i64>, ptr %a
501 %2 = load <2 x i64>, ptr %b
502 %3 = load <2 x i64>, ptr %c
503 %4 = mul <2 x i64> %2, %3
504 %5 = sub <2 x i64> %1, %4
505 store <2 x i64> %5, ptr %d
509 define void @div_s_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
510 ; ALL-LABEL: div_s_v16i8:
512 ; ALL-NEXT: ld.b $w0, 0($6)
513 ; ALL-NEXT: ld.b $w1, 0($5)
514 ; ALL-NEXT: div_s.b $w0, $w1, $w0
516 ; ALL-NEXT: st.b $w0, 0($4)
517 %1 = load <16 x i8>, ptr %a
518 %2 = load <16 x i8>, ptr %b
519 %3 = sdiv <16 x i8> %1, %2
520 store <16 x i8> %3, ptr %c
524 define void @div_s_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
525 ; ALL-LABEL: div_s_v8i16:
527 ; ALL-NEXT: ld.h $w0, 0($6)
528 ; ALL-NEXT: ld.h $w1, 0($5)
529 ; ALL-NEXT: div_s.h $w0, $w1, $w0
531 ; ALL-NEXT: st.h $w0, 0($4)
532 %1 = load <8 x i16>, ptr %a
533 %2 = load <8 x i16>, ptr %b
534 %3 = sdiv <8 x i16> %1, %2
535 store <8 x i16> %3, ptr %c
539 define void @div_s_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
540 ; ALL-LABEL: div_s_v4i32:
542 ; ALL-NEXT: ld.w $w0, 0($6)
543 ; ALL-NEXT: ld.w $w1, 0($5)
544 ; ALL-NEXT: div_s.w $w0, $w1, $w0
546 ; ALL-NEXT: st.w $w0, 0($4)
547 %1 = load <4 x i32>, ptr %a
548 %2 = load <4 x i32>, ptr %b
549 %3 = sdiv <4 x i32> %1, %2
550 store <4 x i32> %3, ptr %c
554 define void @div_s_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
555 ; ALL-LABEL: div_s_v2i64:
557 ; ALL-NEXT: ld.d $w0, 0($6)
558 ; ALL-NEXT: ld.d $w1, 0($5)
559 ; ALL-NEXT: div_s.d $w0, $w1, $w0
561 ; ALL-NEXT: st.d $w0, 0($4)
562 %1 = load <2 x i64>, ptr %a
563 %2 = load <2 x i64>, ptr %b
564 %3 = sdiv <2 x i64> %1, %2
565 store <2 x i64> %3, ptr %c
569 define void @div_u_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
570 ; ALL-LABEL: div_u_v16i8:
572 ; ALL-NEXT: ld.b $w0, 0($6)
573 ; ALL-NEXT: ld.b $w1, 0($5)
574 ; ALL-NEXT: div_u.b $w0, $w1, $w0
576 ; ALL-NEXT: st.b $w0, 0($4)
577 %1 = load <16 x i8>, ptr %a
578 %2 = load <16 x i8>, ptr %b
579 %3 = udiv <16 x i8> %1, %2
580 store <16 x i8> %3, ptr %c
584 define void @div_u_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
585 ; ALL-LABEL: div_u_v8i16:
587 ; ALL-NEXT: ld.h $w0, 0($6)
588 ; ALL-NEXT: ld.h $w1, 0($5)
589 ; ALL-NEXT: div_u.h $w0, $w1, $w0
591 ; ALL-NEXT: st.h $w0, 0($4)
592 %1 = load <8 x i16>, ptr %a
593 %2 = load <8 x i16>, ptr %b
594 %3 = udiv <8 x i16> %1, %2
595 store <8 x i16> %3, ptr %c
599 define void @div_u_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
600 ; ALL-LABEL: div_u_v4i32:
602 ; ALL-NEXT: ld.w $w0, 0($6)
603 ; ALL-NEXT: ld.w $w1, 0($5)
604 ; ALL-NEXT: div_u.w $w0, $w1, $w0
606 ; ALL-NEXT: st.w $w0, 0($4)
607 %1 = load <4 x i32>, ptr %a
608 %2 = load <4 x i32>, ptr %b
609 %3 = udiv <4 x i32> %1, %2
610 store <4 x i32> %3, ptr %c
614 define void @div_u_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
615 ; ALL-LABEL: div_u_v2i64:
617 ; ALL-NEXT: ld.d $w0, 0($6)
618 ; ALL-NEXT: ld.d $w1, 0($5)
619 ; ALL-NEXT: div_u.d $w0, $w1, $w0
621 ; ALL-NEXT: st.d $w0, 0($4)
622 %1 = load <2 x i64>, ptr %a
623 %2 = load <2 x i64>, ptr %b
624 %3 = udiv <2 x i64> %1, %2
625 store <2 x i64> %3, ptr %c
629 define void @mod_s_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
630 ; ALL-LABEL: mod_s_v16i8:
632 ; ALL-NEXT: ld.b $w0, 0($6)
633 ; ALL-NEXT: ld.b $w1, 0($5)
634 ; ALL-NEXT: mod_s.b $w0, $w1, $w0
636 ; ALL-NEXT: st.b $w0, 0($4)
637 %1 = load <16 x i8>, ptr %a
638 %2 = load <16 x i8>, ptr %b
639 %3 = srem <16 x i8> %1, %2
640 store <16 x i8> %3, ptr %c
644 define void @mod_s_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
645 ; ALL-LABEL: mod_s_v8i16:
647 ; ALL-NEXT: ld.h $w0, 0($6)
648 ; ALL-NEXT: ld.h $w1, 0($5)
649 ; ALL-NEXT: mod_s.h $w0, $w1, $w0
651 ; ALL-NEXT: st.h $w0, 0($4)
652 %1 = load <8 x i16>, ptr %a
653 %2 = load <8 x i16>, ptr %b
654 %3 = srem <8 x i16> %1, %2
655 store <8 x i16> %3, ptr %c
659 define void @mod_s_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
660 ; ALL-LABEL: mod_s_v4i32:
662 ; ALL-NEXT: ld.w $w0, 0($6)
663 ; ALL-NEXT: ld.w $w1, 0($5)
664 ; ALL-NEXT: mod_s.w $w0, $w1, $w0
666 ; ALL-NEXT: st.w $w0, 0($4)
667 %1 = load <4 x i32>, ptr %a
668 %2 = load <4 x i32>, ptr %b
669 %3 = srem <4 x i32> %1, %2
670 store <4 x i32> %3, ptr %c
674 define void @mod_s_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
675 ; ALL-LABEL: mod_s_v2i64:
677 ; ALL-NEXT: ld.d $w0, 0($6)
678 ; ALL-NEXT: ld.d $w1, 0($5)
679 ; ALL-NEXT: mod_s.d $w0, $w1, $w0
681 ; ALL-NEXT: st.d $w0, 0($4)
682 %1 = load <2 x i64>, ptr %a
683 %2 = load <2 x i64>, ptr %b
684 %3 = srem <2 x i64> %1, %2
685 store <2 x i64> %3, ptr %c
689 define void @mod_u_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
690 ; ALL-LABEL: mod_u_v16i8:
692 ; ALL-NEXT: ld.b $w0, 0($6)
693 ; ALL-NEXT: ld.b $w1, 0($5)
694 ; ALL-NEXT: mod_u.b $w0, $w1, $w0
696 ; ALL-NEXT: st.b $w0, 0($4)
697 %1 = load <16 x i8>, ptr %a
698 %2 = load <16 x i8>, ptr %b
699 %3 = urem <16 x i8> %1, %2
700 store <16 x i8> %3, ptr %c
704 define void @mod_u_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
705 ; ALL-LABEL: mod_u_v8i16:
707 ; ALL-NEXT: ld.h $w0, 0($6)
708 ; ALL-NEXT: ld.h $w1, 0($5)
709 ; ALL-NEXT: mod_u.h $w0, $w1, $w0
711 ; ALL-NEXT: st.h $w0, 0($4)
712 %1 = load <8 x i16>, ptr %a
713 %2 = load <8 x i16>, ptr %b
714 %3 = urem <8 x i16> %1, %2
715 store <8 x i16> %3, ptr %c
719 define void @mod_u_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
720 ; ALL-LABEL: mod_u_v4i32:
722 ; ALL-NEXT: ld.w $w0, 0($6)
723 ; ALL-NEXT: ld.w $w1, 0($5)
724 ; ALL-NEXT: mod_u.w $w0, $w1, $w0
726 ; ALL-NEXT: st.w $w0, 0($4)
727 %1 = load <4 x i32>, ptr %a
728 %2 = load <4 x i32>, ptr %b
729 %3 = urem <4 x i32> %1, %2
730 store <4 x i32> %3, ptr %c
734 define void @mod_u_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
735 ; ALL-LABEL: mod_u_v2i64:
737 ; ALL-NEXT: ld.d $w0, 0($6)
738 ; ALL-NEXT: ld.d $w1, 0($5)
739 ; ALL-NEXT: mod_u.d $w0, $w1, $w0
741 ; ALL-NEXT: st.d $w0, 0($4)
742 %1 = load <2 x i64>, ptr %a
743 %2 = load <2 x i64>, ptr %b
744 %3 = urem <2 x i64> %1, %2
745 store <2 x i64> %3, ptr %c