1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 -relocation-model=pic < %s \
3 ; RUN: | FileCheck %s -check-prefixes=MSA,MSA32
4 ; RUN: llc -march=mips64 -mattr=+msa,+fp64 -relocation-model=pic -target-abi n32 < %s \
5 ; RUN: | FileCheck %s -check-prefix=MSA64N32
6 ; RUN: llc -march=mips64 -mattr=+msa,+fp64 -relocation-model=pic -target-abi n64 < %s \
7 ; RUN: | FileCheck %s -check-prefixes=MSA,MSA64N64
9 ; Test that the immediate intrinsics don't crash LLVM.
11 ; Some of the intrinsics lower to equivalent forms.
13 define void @addvi_b(ptr %ptr) {
15 ; MSA: # %bb.0: # %entry
16 ; MSA-NEXT: ld.b $w0, 0($4)
17 ; MSA-NEXT: addvi.b $w0, $w0, 25
19 ; MSA-NEXT: st.b $w0, 0($4)
21 ; MSA64N32-LABEL: addvi_b:
22 ; MSA64N32: # %bb.0: # %entry
23 ; MSA64N32-NEXT: sll $1, $4, 0
24 ; MSA64N32-NEXT: ld.b $w0, 0($1)
25 ; MSA64N32-NEXT: addvi.b $w0, $w0, 25
26 ; MSA64N32-NEXT: jr $ra
27 ; MSA64N32-NEXT: st.b $w0, 0($1)
29 %a = load <16 x i8>, ptr %ptr, align 16
30 %r = call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %a, i32 25)
31 store <16 x i8> %r, ptr %ptr, align 16
35 define void @andi_b(ptr %ptr) {
37 ; MSA: # %bb.0: # %entry
38 ; MSA-NEXT: ld.b $w0, 0($4)
39 ; MSA-NEXT: andi.b $w0, $w0, 25
41 ; MSA-NEXT: st.b $w0, 0($4)
43 ; MSA64N32-LABEL: andi_b:
44 ; MSA64N32: # %bb.0: # %entry
45 ; MSA64N32-NEXT: sll $1, $4, 0
46 ; MSA64N32-NEXT: ld.b $w0, 0($1)
47 ; MSA64N32-NEXT: andi.b $w0, $w0, 25
48 ; MSA64N32-NEXT: jr $ra
49 ; MSA64N32-NEXT: st.b $w0, 0($1)
51 %a = load <16 x i8>, ptr %ptr, align 16
52 %r = call <16 x i8> @llvm.mips.andi.b(<16 x i8> %a, i32 25)
53 store <16 x i8> %r, ptr %ptr, align 16
57 define void @bclri_b(ptr %ptr) {
59 ; MSA: # %bb.0: # %entry
60 ; MSA-NEXT: ld.b $w0, 0($4)
61 ; MSA-NEXT: andi.b $w0, $w0, 247
63 ; MSA-NEXT: st.b $w0, 0($4)
65 ; MSA64N32-LABEL: bclri_b:
66 ; MSA64N32: # %bb.0: # %entry
67 ; MSA64N32-NEXT: sll $1, $4, 0
68 ; MSA64N32-NEXT: ld.b $w0, 0($1)
69 ; MSA64N32-NEXT: andi.b $w0, $w0, 247
70 ; MSA64N32-NEXT: jr $ra
71 ; MSA64N32-NEXT: st.b $w0, 0($1)
73 %a = load <16 x i8>, ptr %ptr, align 16
74 %r = call <16 x i8> @llvm.mips.bclri.b(<16 x i8> %a, i32 3)
75 store <16 x i8> %r, ptr %ptr, align 16
79 define void @binsli_b(ptr %ptr, ptr %ptr2) {
80 ; MSA-LABEL: binsli_b:
81 ; MSA: # %bb.0: # %entry
82 ; MSA-NEXT: ld.b $w0, 0($5)
83 ; MSA-NEXT: ld.b $w1, 0($4)
84 ; MSA-NEXT: binsli.b $w1, $w0, 3
86 ; MSA-NEXT: st.b $w1, 0($4)
88 ; MSA64N32-LABEL: binsli_b:
89 ; MSA64N32: # %bb.0: # %entry
90 ; MSA64N32-NEXT: sll $1, $4, 0
91 ; MSA64N32-NEXT: sll $2, $5, 0
92 ; MSA64N32-NEXT: ld.b $w0, 0($2)
93 ; MSA64N32-NEXT: ld.b $w1, 0($1)
94 ; MSA64N32-NEXT: binsli.b $w1, $w0, 3
95 ; MSA64N32-NEXT: jr $ra
96 ; MSA64N32-NEXT: st.b $w1, 0($1)
98 %a = load <16 x i8>, ptr %ptr, align 16
99 %b = load <16 x i8>, ptr %ptr2, align 16
100 %r = call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %a, <16 x i8> %b, i32 3)
101 store <16 x i8> %r, ptr %ptr, align 16
105 define void @binsri_b(ptr %ptr, ptr %ptr2) {
106 ; MSA-LABEL: binsri_b:
107 ; MSA: # %bb.0: # %entry
108 ; MSA-NEXT: ld.b $w0, 0($5)
109 ; MSA-NEXT: ld.b $w1, 0($4)
110 ; MSA-NEXT: binsri.b $w1, $w0, 5
112 ; MSA-NEXT: st.b $w1, 0($4)
114 ; MSA64N32-LABEL: binsri_b:
115 ; MSA64N32: # %bb.0: # %entry
116 ; MSA64N32-NEXT: sll $1, $4, 0
117 ; MSA64N32-NEXT: sll $2, $5, 0
118 ; MSA64N32-NEXT: ld.b $w0, 0($2)
119 ; MSA64N32-NEXT: ld.b $w1, 0($1)
120 ; MSA64N32-NEXT: binsri.b $w1, $w0, 5
121 ; MSA64N32-NEXT: jr $ra
122 ; MSA64N32-NEXT: st.b $w1, 0($1)
124 %a = load <16 x i8>, ptr %ptr, align 16
125 %b = load <16 x i8>, ptr %ptr2, align 16
126 %r = call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %a, <16 x i8> %b, i32 5)
127 store <16 x i8> %r, ptr %ptr, align 16
131 define void @bmnzi_b(ptr %ptr, ptr %ptr2) {
132 ; MSA-LABEL: bmnzi_b:
133 ; MSA: # %bb.0: # %entry
134 ; MSA-NEXT: ld.b $w0, 0($5)
135 ; MSA-NEXT: ld.b $w1, 0($4)
136 ; MSA-NEXT: bmnzi.b $w1, $w0, 25
138 ; MSA-NEXT: st.b $w1, 0($4)
140 ; MSA64N32-LABEL: bmnzi_b:
141 ; MSA64N32: # %bb.0: # %entry
142 ; MSA64N32-NEXT: sll $1, $4, 0
143 ; MSA64N32-NEXT: sll $2, $5, 0
144 ; MSA64N32-NEXT: ld.b $w0, 0($2)
145 ; MSA64N32-NEXT: ld.b $w1, 0($1)
146 ; MSA64N32-NEXT: bmnzi.b $w1, $w0, 25
147 ; MSA64N32-NEXT: jr $ra
148 ; MSA64N32-NEXT: st.b $w1, 0($1)
150 %a = load <16 x i8>, ptr %ptr, align 16
151 %b = load <16 x i8>, ptr %ptr2, align 16
152 %r = call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %a, <16 x i8> %b, i32 25)
153 store <16 x i8> %r, ptr %ptr, align 16
157 define void @bmzi_b(ptr %ptr, ptr %ptr2) {
159 ; MSA: # %bb.0: # %entry
160 ; MSA-NEXT: ld.b $w0, 0($4)
161 ; MSA-NEXT: ld.b $w1, 0($5)
162 ; MSA-NEXT: bmnzi.b $w1, $w0, 25
164 ; MSA-NEXT: st.b $w1, 0($4)
166 ; MSA64N32-LABEL: bmzi_b:
167 ; MSA64N32: # %bb.0: # %entry
168 ; MSA64N32-NEXT: sll $1, $5, 0
169 ; MSA64N32-NEXT: sll $2, $4, 0
170 ; MSA64N32-NEXT: ld.b $w0, 0($2)
171 ; MSA64N32-NEXT: ld.b $w1, 0($1)
172 ; MSA64N32-NEXT: bmnzi.b $w1, $w0, 25
173 ; MSA64N32-NEXT: jr $ra
174 ; MSA64N32-NEXT: st.b $w1, 0($2)
176 %a = load <16 x i8>, ptr %ptr, align 16
177 %b = load <16 x i8>, ptr %ptr2, align 16
178 %r = call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %a, <16 x i8> %b, i32 25)
179 store <16 x i8> %r, ptr %ptr, align 16
183 define void @bnegi_b(ptr %ptr) {
184 ; MSA-LABEL: bnegi_b:
185 ; MSA: # %bb.0: # %entry
186 ; MSA-NEXT: ld.b $w0, 0($4)
187 ; MSA-NEXT: bnegi.b $w0, $w0, 6
189 ; MSA-NEXT: st.b $w0, 0($4)
191 ; MSA64N32-LABEL: bnegi_b:
192 ; MSA64N32: # %bb.0: # %entry
193 ; MSA64N32-NEXT: sll $1, $4, 0
194 ; MSA64N32-NEXT: ld.b $w0, 0($1)
195 ; MSA64N32-NEXT: bnegi.b $w0, $w0, 6
196 ; MSA64N32-NEXT: jr $ra
197 ; MSA64N32-NEXT: st.b $w0, 0($1)
199 %a = load <16 x i8>, ptr %ptr, align 16
200 %r = call <16 x i8> @llvm.mips.bnegi.b(<16 x i8> %a, i32 6)
201 store <16 x i8> %r, ptr %ptr, align 16
205 define void @bseli_b(ptr %ptr) {
206 ; MSA-LABEL: bseli_b:
207 ; MSA: # %bb.0: # %entry
208 ; MSA-NEXT: ld.b $w0, 0($4)
209 ; MSA-NEXT: bseli.b $w0, $w0, 25
211 ; MSA-NEXT: st.b $w0, 0($4)
213 ; MSA64N32-LABEL: bseli_b:
214 ; MSA64N32: # %bb.0: # %entry
215 ; MSA64N32-NEXT: sll $1, $4, 0
216 ; MSA64N32-NEXT: ld.b $w0, 0($1)
217 ; MSA64N32-NEXT: bseli.b $w0, $w0, 25
218 ; MSA64N32-NEXT: jr $ra
219 ; MSA64N32-NEXT: st.b $w0, 0($1)
221 %a = load <16 x i8>, ptr %ptr, align 16
222 %r = call <16 x i8> @llvm.mips.bseli.b(<16 x i8> %a, <16 x i8> %a, i32 25)
223 store <16 x i8> %r, ptr %ptr, align 16
227 define void @bseti_b(ptr %ptr) {
228 ; MSA-LABEL: bseti_b:
229 ; MSA: # %bb.0: # %entry
230 ; MSA-NEXT: ld.b $w0, 0($4)
231 ; MSA-NEXT: bseti.b $w0, $w0, 5
233 ; MSA-NEXT: st.b $w0, 0($4)
235 ; MSA64N32-LABEL: bseti_b:
236 ; MSA64N32: # %bb.0: # %entry
237 ; MSA64N32-NEXT: sll $1, $4, 0
238 ; MSA64N32-NEXT: ld.b $w0, 0($1)
239 ; MSA64N32-NEXT: bseti.b $w0, $w0, 5
240 ; MSA64N32-NEXT: jr $ra
241 ; MSA64N32-NEXT: st.b $w0, 0($1)
243 %a = load <16 x i8>, ptr %ptr, align 16
244 %r = call <16 x i8> @llvm.mips.bseti.b(<16 x i8> %a, i32 5)
245 store <16 x i8> %r, ptr %ptr, align 16
249 define void @clei_s_b(ptr %ptr) {
250 ; MSA-LABEL: clei_s_b:
251 ; MSA: # %bb.0: # %entry
252 ; MSA-NEXT: ld.b $w0, 0($4)
253 ; MSA-NEXT: clei_s.b $w0, $w0, 12
255 ; MSA-NEXT: st.b $w0, 0($4)
257 ; MSA64N32-LABEL: clei_s_b:
258 ; MSA64N32: # %bb.0: # %entry
259 ; MSA64N32-NEXT: sll $1, $4, 0
260 ; MSA64N32-NEXT: ld.b $w0, 0($1)
261 ; MSA64N32-NEXT: clei_s.b $w0, $w0, 12
262 ; MSA64N32-NEXT: jr $ra
263 ; MSA64N32-NEXT: st.b $w0, 0($1)
265 %a = load <16 x i8>, ptr %ptr, align 16
266 %r = call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %a, i32 12)
267 store <16 x i8> %r, ptr %ptr, align 16
271 define void @clei_u_b(ptr %ptr) {
272 ; MSA-LABEL: clei_u_b:
273 ; MSA: # %bb.0: # %entry
274 ; MSA-NEXT: ld.b $w0, 0($4)
275 ; MSA-NEXT: clei_u.b $w0, $w0, 25
277 ; MSA-NEXT: st.b $w0, 0($4)
279 ; MSA64N32-LABEL: clei_u_b:
280 ; MSA64N32: # %bb.0: # %entry
281 ; MSA64N32-NEXT: sll $1, $4, 0
282 ; MSA64N32-NEXT: ld.b $w0, 0($1)
283 ; MSA64N32-NEXT: clei_u.b $w0, $w0, 25
284 ; MSA64N32-NEXT: jr $ra
285 ; MSA64N32-NEXT: st.b $w0, 0($1)
287 %a = load <16 x i8>, ptr %ptr, align 16
288 %r = call <16 x i8> @llvm.mips.clei.u.b(<16 x i8> %a, i32 25)
289 store <16 x i8> %r, ptr %ptr, align 16
293 define void @clti_s_b(ptr %ptr) {
294 ; MSA-LABEL: clti_s_b:
295 ; MSA: # %bb.0: # %entry
296 ; MSA-NEXT: ld.b $w0, 0($4)
297 ; MSA-NEXT: clti_s.b $w0, $w0, 15
299 ; MSA-NEXT: st.b $w0, 0($4)
301 ; MSA64N32-LABEL: clti_s_b:
302 ; MSA64N32: # %bb.0: # %entry
303 ; MSA64N32-NEXT: sll $1, $4, 0
304 ; MSA64N32-NEXT: ld.b $w0, 0($1)
305 ; MSA64N32-NEXT: clti_s.b $w0, $w0, 15
306 ; MSA64N32-NEXT: jr $ra
307 ; MSA64N32-NEXT: st.b $w0, 0($1)
309 %a = load <16 x i8>, ptr %ptr, align 16
310 %r = call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %a, i32 15)
311 store <16 x i8> %r, ptr %ptr, align 16
315 define void @clti_u_b(ptr %ptr) {
316 ; MSA-LABEL: clti_u_b:
317 ; MSA: # %bb.0: # %entry
318 ; MSA-NEXT: ld.b $w0, 0($4)
319 ; MSA-NEXT: clti_u.b $w0, $w0, 25
321 ; MSA-NEXT: st.b $w0, 0($4)
323 ; MSA64N32-LABEL: clti_u_b:
324 ; MSA64N32: # %bb.0: # %entry
325 ; MSA64N32-NEXT: sll $1, $4, 0
326 ; MSA64N32-NEXT: ld.b $w0, 0($1)
327 ; MSA64N32-NEXT: clti_u.b $w0, $w0, 25
328 ; MSA64N32-NEXT: jr $ra
329 ; MSA64N32-NEXT: st.b $w0, 0($1)
331 %a = load <16 x i8>, ptr %ptr, align 16
332 %r = call <16 x i8> @llvm.mips.clti.u.b(<16 x i8> %a, i32 25)
333 store <16 x i8> %r, ptr %ptr, align 16
337 define void @ldi_b(ptr %ptr) {
339 ; MSA: # %bb.0: # %entry
340 ; MSA-NEXT: ldi.b $w0, 3
342 ; MSA-NEXT: st.b $w0, 0($4)
344 ; MSA64N32-LABEL: ldi_b:
345 ; MSA64N32: # %bb.0: # %entry
346 ; MSA64N32-NEXT: sll $1, $4, 0
347 ; MSA64N32-NEXT: ldi.b $w0, 3
348 ; MSA64N32-NEXT: jr $ra
349 ; MSA64N32-NEXT: st.b $w0, 0($1)
351 %r = call <16 x i8> @llvm.mips.ldi.b(i32 3)
352 store <16 x i8> %r, ptr %ptr, align 16
356 define void @maxi_s_b(ptr %ptr) {
357 ; MSA-LABEL: maxi_s_b:
358 ; MSA: # %bb.0: # %entry
359 ; MSA-NEXT: ld.b $w0, 0($4)
360 ; MSA-NEXT: maxi_s.b $w0, $w0, 2
362 ; MSA-NEXT: st.b $w0, 0($4)
364 ; MSA64N32-LABEL: maxi_s_b:
365 ; MSA64N32: # %bb.0: # %entry
366 ; MSA64N32-NEXT: sll $1, $4, 0
367 ; MSA64N32-NEXT: ld.b $w0, 0($1)
368 ; MSA64N32-NEXT: maxi_s.b $w0, $w0, 2
369 ; MSA64N32-NEXT: jr $ra
370 ; MSA64N32-NEXT: st.b $w0, 0($1)
372 %a = load <16 x i8>, ptr %ptr, align 16
373 %r = call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %a, i32 2)
374 store <16 x i8> %r, ptr %ptr, align 16
378 define void @maxi_u_b(ptr %ptr) {
379 ; MSA-LABEL: maxi_u_b:
380 ; MSA: # %bb.0: # %entry
381 ; MSA-NEXT: ld.b $w0, 0($4)
382 ; MSA-NEXT: maxi_u.b $w0, $w0, 2
384 ; MSA-NEXT: st.b $w0, 0($4)
386 ; MSA64N32-LABEL: maxi_u_b:
387 ; MSA64N32: # %bb.0: # %entry
388 ; MSA64N32-NEXT: sll $1, $4, 0
389 ; MSA64N32-NEXT: ld.b $w0, 0($1)
390 ; MSA64N32-NEXT: maxi_u.b $w0, $w0, 2
391 ; MSA64N32-NEXT: jr $ra
392 ; MSA64N32-NEXT: st.b $w0, 0($1)
394 %a = load <16 x i8>, ptr %ptr, align 16
395 %r = call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %a, i32 2)
396 store <16 x i8> %r, ptr %ptr, align 16
400 define void @mini_s_b(ptr %ptr) {
401 ; MSA-LABEL: mini_s_b:
402 ; MSA: # %bb.0: # %entry
403 ; MSA-NEXT: ld.b $w0, 0($4)
404 ; MSA-NEXT: mini_s.b $w0, $w0, 2
406 ; MSA-NEXT: st.b $w0, 0($4)
408 ; MSA64N32-LABEL: mini_s_b:
409 ; MSA64N32: # %bb.0: # %entry
410 ; MSA64N32-NEXT: sll $1, $4, 0
411 ; MSA64N32-NEXT: ld.b $w0, 0($1)
412 ; MSA64N32-NEXT: mini_s.b $w0, $w0, 2
413 ; MSA64N32-NEXT: jr $ra
414 ; MSA64N32-NEXT: st.b $w0, 0($1)
416 %a = load <16 x i8>, ptr %ptr, align 16
417 %r = call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %a, i32 2)
418 store <16 x i8> %r, ptr %ptr, align 16
422 define void @mini_u_b(ptr %ptr) {
423 ; MSA-LABEL: mini_u_b:
424 ; MSA: # %bb.0: # %entry
425 ; MSA-NEXT: ld.b $w0, 0($4)
426 ; MSA-NEXT: mini_u.b $w0, $w0, 2
428 ; MSA-NEXT: st.b $w0, 0($4)
430 ; MSA64N32-LABEL: mini_u_b:
431 ; MSA64N32: # %bb.0: # %entry
432 ; MSA64N32-NEXT: sll $1, $4, 0
433 ; MSA64N32-NEXT: ld.b $w0, 0($1)
434 ; MSA64N32-NEXT: mini_u.b $w0, $w0, 2
435 ; MSA64N32-NEXT: jr $ra
436 ; MSA64N32-NEXT: st.b $w0, 0($1)
438 %a = load <16 x i8>, ptr %ptr, align 16
439 %r = call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %a, i32 2)
440 store <16 x i8> %r, ptr %ptr, align 16
444 define void @nori_b(ptr %ptr) {
446 ; MSA: # %bb.0: # %entry
447 ; MSA-NEXT: ld.b $w0, 0($4)
448 ; MSA-NEXT: nori.b $w0, $w0, 25
450 ; MSA-NEXT: st.b $w0, 0($4)
452 ; MSA64N32-LABEL: nori_b:
453 ; MSA64N32: # %bb.0: # %entry
454 ; MSA64N32-NEXT: sll $1, $4, 0
455 ; MSA64N32-NEXT: ld.b $w0, 0($1)
456 ; MSA64N32-NEXT: nori.b $w0, $w0, 25
457 ; MSA64N32-NEXT: jr $ra
458 ; MSA64N32-NEXT: st.b $w0, 0($1)
460 %a = load <16 x i8>, ptr %ptr, align 16
461 %r = call <16 x i8> @llvm.mips.nori.b(<16 x i8> %a, i32 25)
462 store <16 x i8> %r, ptr %ptr, align 16
466 define void @ori_b(ptr %ptr) {
468 ; MSA: # %bb.0: # %entry
469 ; MSA-NEXT: ld.b $w0, 0($4)
470 ; MSA-NEXT: ori.b $w0, $w0, 25
472 ; MSA-NEXT: st.b $w0, 0($4)
474 ; MSA64N32-LABEL: ori_b:
475 ; MSA64N32: # %bb.0: # %entry
476 ; MSA64N32-NEXT: sll $1, $4, 0
477 ; MSA64N32-NEXT: ld.b $w0, 0($1)
478 ; MSA64N32-NEXT: ori.b $w0, $w0, 25
479 ; MSA64N32-NEXT: jr $ra
480 ; MSA64N32-NEXT: st.b $w0, 0($1)
482 %a = load <16 x i8>, ptr %ptr, align 16
483 %r = call <16 x i8> @llvm.mips.ori.b(<16 x i8> %a, i32 25)
484 store <16 x i8> %r, ptr %ptr, align 16
488 define void @sldi_b(ptr %ptr) {
490 ; MSA: # %bb.0: # %entry
491 ; MSA-NEXT: ld.b $w0, 0($4)
492 ; MSA-NEXT: sldi.b $w0, $w0[7]
494 ; MSA-NEXT: st.b $w0, 0($4)
496 ; MSA64N32-LABEL: sldi_b:
497 ; MSA64N32: # %bb.0: # %entry
498 ; MSA64N32-NEXT: sll $1, $4, 0
499 ; MSA64N32-NEXT: ld.b $w0, 0($1)
500 ; MSA64N32-NEXT: sldi.b $w0, $w0[7]
501 ; MSA64N32-NEXT: jr $ra
502 ; MSA64N32-NEXT: st.b $w0, 0($1)
504 %a = load <16 x i8>, ptr %ptr, align 16
505 %r = call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %a, <16 x i8> %a, i32 7)
506 store <16 x i8> %r, ptr %ptr, align 16
510 define void @slli_b(ptr %ptr) {
512 ; MSA: # %bb.0: # %entry
513 ; MSA-NEXT: ld.b $w0, 0($4)
514 ; MSA-NEXT: slli.b $w0, $w0, 3
516 ; MSA-NEXT: st.b $w0, 0($4)
518 ; MSA64N32-LABEL: slli_b:
519 ; MSA64N32: # %bb.0: # %entry
520 ; MSA64N32-NEXT: sll $1, $4, 0
521 ; MSA64N32-NEXT: ld.b $w0, 0($1)
522 ; MSA64N32-NEXT: slli.b $w0, $w0, 3
523 ; MSA64N32-NEXT: jr $ra
524 ; MSA64N32-NEXT: st.b $w0, 0($1)
526 %a = load <16 x i8>, ptr %ptr, align 16
527 %r = call <16 x i8> @llvm.mips.slli.b(<16 x i8> %a, i32 3)
528 store <16 x i8> %r, ptr %ptr, align 16
532 define void @splati_b(ptr %ptr) {
533 ; MSA-LABEL: splati_b:
534 ; MSA: # %bb.0: # %entry
535 ; MSA-NEXT: ld.b $w0, 0($4)
536 ; MSA-NEXT: splati.b $w0, $w0[3]
538 ; MSA-NEXT: st.b $w0, 0($4)
540 ; MSA64N32-LABEL: splati_b:
541 ; MSA64N32: # %bb.0: # %entry
542 ; MSA64N32-NEXT: sll $1, $4, 0
543 ; MSA64N32-NEXT: ld.b $w0, 0($1)
544 ; MSA64N32-NEXT: splati.b $w0, $w0[3]
545 ; MSA64N32-NEXT: jr $ra
546 ; MSA64N32-NEXT: st.b $w0, 0($1)
548 %a = load <16 x i8>, ptr %ptr, align 16
549 %r = call <16 x i8> @llvm.mips.splati.b(<16 x i8> %a, i32 3)
550 store <16 x i8> %r, ptr %ptr, align 16
554 define void @srai_b(ptr %ptr) {
556 ; MSA: # %bb.0: # %entry
557 ; MSA-NEXT: ld.b $w0, 0($4)
558 ; MSA-NEXT: srai.b $w0, $w0, 3
560 ; MSA-NEXT: st.b $w0, 0($4)
562 ; MSA64N32-LABEL: srai_b:
563 ; MSA64N32: # %bb.0: # %entry
564 ; MSA64N32-NEXT: sll $1, $4, 0
565 ; MSA64N32-NEXT: ld.b $w0, 0($1)
566 ; MSA64N32-NEXT: srai.b $w0, $w0, 3
567 ; MSA64N32-NEXT: jr $ra
568 ; MSA64N32-NEXT: st.b $w0, 0($1)
570 %a = load <16 x i8>, ptr %ptr, align 16
571 %r = call <16 x i8> @llvm.mips.srai.b(<16 x i8> %a, i32 3)
572 store <16 x i8> %r, ptr %ptr, align 16
576 define void @srari_b(ptr %ptr) {
577 ; MSA-LABEL: srari_b:
578 ; MSA: # %bb.0: # %entry
579 ; MSA-NEXT: ld.b $w0, 0($4)
580 ; MSA-NEXT: srari.b $w0, $w0, 3
582 ; MSA-NEXT: st.b $w0, 0($4)
584 ; MSA64N32-LABEL: srari_b:
585 ; MSA64N32: # %bb.0: # %entry
586 ; MSA64N32-NEXT: sll $1, $4, 0
587 ; MSA64N32-NEXT: ld.b $w0, 0($1)
588 ; MSA64N32-NEXT: srari.b $w0, $w0, 3
589 ; MSA64N32-NEXT: jr $ra
590 ; MSA64N32-NEXT: st.b $w0, 0($1)
592 %a = load <16 x i8>, ptr %ptr, align 16
593 %r = call <16 x i8> @llvm.mips.srari.b(<16 x i8> %a, i32 3)
594 store <16 x i8> %r, ptr %ptr, align 16
598 define void @srli_b(ptr %ptr) {
600 ; MSA: # %bb.0: # %entry
601 ; MSA-NEXT: ld.b $w0, 0($4)
602 ; MSA-NEXT: srli.b $w0, $w0, 3
604 ; MSA-NEXT: st.b $w0, 0($4)
606 ; MSA64N32-LABEL: srli_b:
607 ; MSA64N32: # %bb.0: # %entry
608 ; MSA64N32-NEXT: sll $1, $4, 0
609 ; MSA64N32-NEXT: ld.b $w0, 0($1)
610 ; MSA64N32-NEXT: srli.b $w0, $w0, 3
611 ; MSA64N32-NEXT: jr $ra
612 ; MSA64N32-NEXT: st.b $w0, 0($1)
614 %a = load <16 x i8>, ptr %ptr, align 16
615 %r = call <16 x i8> @llvm.mips.srli.b(<16 x i8> %a, i32 3)
616 store <16 x i8> %r, ptr %ptr, align 16
620 define void @srlri_b(ptr %ptr) {
621 ; MSA-LABEL: srlri_b:
622 ; MSA: # %bb.0: # %entry
623 ; MSA-NEXT: ld.b $w0, 0($4)
624 ; MSA-NEXT: srlri.b $w0, $w0, 3
626 ; MSA-NEXT: st.b $w0, 0($4)
628 ; MSA64N32-LABEL: srlri_b:
629 ; MSA64N32: # %bb.0: # %entry
630 ; MSA64N32-NEXT: sll $1, $4, 0
631 ; MSA64N32-NEXT: ld.b $w0, 0($1)
632 ; MSA64N32-NEXT: srlri.b $w0, $w0, 3
633 ; MSA64N32-NEXT: jr $ra
634 ; MSA64N32-NEXT: st.b $w0, 0($1)
636 %a = load <16 x i8>, ptr %ptr, align 16
637 %r = call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %a, i32 3)
638 store <16 x i8> %r, ptr %ptr, align 16
642 define void @addvi_w(ptr %ptr) {
643 ; MSA-LABEL: addvi_w:
644 ; MSA: # %bb.0: # %entry
645 ; MSA-NEXT: ld.w $w0, 0($4)
646 ; MSA-NEXT: addvi.w $w0, $w0, 25
648 ; MSA-NEXT: st.w $w0, 0($4)
650 ; MSA64N32-LABEL: addvi_w:
651 ; MSA64N32: # %bb.0: # %entry
652 ; MSA64N32-NEXT: sll $1, $4, 0
653 ; MSA64N32-NEXT: ld.w $w0, 0($1)
654 ; MSA64N32-NEXT: addvi.w $w0, $w0, 25
655 ; MSA64N32-NEXT: jr $ra
656 ; MSA64N32-NEXT: st.w $w0, 0($1)
658 %a = load <4 x i32>, ptr %ptr, align 16
659 %r = call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %a, i32 25)
660 store <4 x i32> %r, ptr %ptr, align 16
664 define void @bclri_w(ptr %ptr) {
665 ; MSA-LABEL: bclri_w:
666 ; MSA: # %bb.0: # %entry
667 ; MSA-NEXT: ld.w $w0, 0($4)
668 ; MSA-NEXT: bclri.w $w0, $w0, 25
670 ; MSA-NEXT: st.w $w0, 0($4)
672 ; MSA64N32-LABEL: bclri_w:
673 ; MSA64N32: # %bb.0: # %entry
674 ; MSA64N32-NEXT: sll $1, $4, 0
675 ; MSA64N32-NEXT: ld.w $w0, 0($1)
676 ; MSA64N32-NEXT: bclri.w $w0, $w0, 25
677 ; MSA64N32-NEXT: jr $ra
678 ; MSA64N32-NEXT: st.w $w0, 0($1)
680 %a = load <4 x i32>, ptr %ptr, align 16
681 %r = call <4 x i32> @llvm.mips.bclri.w(<4 x i32> %a, i32 25)
682 store <4 x i32> %r, ptr %ptr, align 16
686 define void @binsli_w(ptr %ptr, ptr %ptr2) {
687 ; MSA-LABEL: binsli_w:
688 ; MSA: # %bb.0: # %entry
689 ; MSA-NEXT: ld.w $w0, 0($5)
690 ; MSA-NEXT: ld.w $w1, 0($4)
691 ; MSA-NEXT: binsli.w $w1, $w0, 25
693 ; MSA-NEXT: st.w $w1, 0($4)
695 ; MSA64N32-LABEL: binsli_w:
696 ; MSA64N32: # %bb.0: # %entry
697 ; MSA64N32-NEXT: sll $1, $4, 0
698 ; MSA64N32-NEXT: sll $2, $5, 0
699 ; MSA64N32-NEXT: ld.w $w0, 0($2)
700 ; MSA64N32-NEXT: ld.w $w1, 0($1)
701 ; MSA64N32-NEXT: binsli.w $w1, $w0, 25
702 ; MSA64N32-NEXT: jr $ra
703 ; MSA64N32-NEXT: st.w $w1, 0($1)
705 %a = load <4 x i32>, ptr %ptr, align 16
706 %b = load <4 x i32>, ptr %ptr2, align 16
707 %r = call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %a, <4 x i32> %b, i32 25)
708 store <4 x i32> %r, ptr %ptr, align 16
712 define void @binsri_w(ptr %ptr, ptr %ptr2) {
713 ; MSA-LABEL: binsri_w:
714 ; MSA: # %bb.0: # %entry
715 ; MSA-NEXT: ld.w $w0, 0($5)
716 ; MSA-NEXT: ld.w $w1, 0($4)
717 ; MSA-NEXT: binsri.w $w1, $w0, 25
719 ; MSA-NEXT: st.w $w1, 0($4)
721 ; MSA64N32-LABEL: binsri_w:
722 ; MSA64N32: # %bb.0: # %entry
723 ; MSA64N32-NEXT: sll $1, $4, 0
724 ; MSA64N32-NEXT: sll $2, $5, 0
725 ; MSA64N32-NEXT: ld.w $w0, 0($2)
726 ; MSA64N32-NEXT: ld.w $w1, 0($1)
727 ; MSA64N32-NEXT: binsri.w $w1, $w0, 25
728 ; MSA64N32-NEXT: jr $ra
729 ; MSA64N32-NEXT: st.w $w1, 0($1)
731 %a = load <4 x i32>, ptr %ptr, align 16
732 %b = load <4 x i32>, ptr %ptr2, align 16
733 %r = call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %a, <4 x i32> %b, i32 25)
734 store <4 x i32> %r, ptr %ptr, align 16
738 define void @bnegi_w(ptr %ptr) {
739 ; MSA-LABEL: bnegi_w:
740 ; MSA: # %bb.0: # %entry
741 ; MSA-NEXT: ld.w $w0, 0($4)
742 ; MSA-NEXT: bnegi.w $w0, $w0, 25
744 ; MSA-NEXT: st.w $w0, 0($4)
746 ; MSA64N32-LABEL: bnegi_w:
747 ; MSA64N32: # %bb.0: # %entry
748 ; MSA64N32-NEXT: sll $1, $4, 0
749 ; MSA64N32-NEXT: ld.w $w0, 0($1)
750 ; MSA64N32-NEXT: bnegi.w $w0, $w0, 25
751 ; MSA64N32-NEXT: jr $ra
752 ; MSA64N32-NEXT: st.w $w0, 0($1)
754 %a = load <4 x i32>, ptr %ptr, align 16
755 %r = call <4 x i32> @llvm.mips.bnegi.w(<4 x i32> %a, i32 25)
756 store <4 x i32> %r, ptr %ptr, align 16
760 define void @bseti_w(ptr %ptr) {
761 ; MSA-LABEL: bseti_w:
762 ; MSA: # %bb.0: # %entry
763 ; MSA-NEXT: ld.w $w0, 0($4)
764 ; MSA-NEXT: bseti.w $w0, $w0, 25
766 ; MSA-NEXT: st.w $w0, 0($4)
768 ; MSA64N32-LABEL: bseti_w:
769 ; MSA64N32: # %bb.0: # %entry
770 ; MSA64N32-NEXT: sll $1, $4, 0
771 ; MSA64N32-NEXT: ld.w $w0, 0($1)
772 ; MSA64N32-NEXT: bseti.w $w0, $w0, 25
773 ; MSA64N32-NEXT: jr $ra
774 ; MSA64N32-NEXT: st.w $w0, 0($1)
776 %a = load <4 x i32>, ptr %ptr, align 16
777 %r = call <4 x i32> @llvm.mips.bseti.w(<4 x i32> %a, i32 25)
778 store <4 x i32> %r, ptr %ptr, align 16
782 define void @clei_s_w(ptr %ptr) {
783 ; MSA-LABEL: clei_s_w:
784 ; MSA: # %bb.0: # %entry
785 ; MSA-NEXT: ld.w $w0, 0($4)
786 ; MSA-NEXT: clei_s.w $w0, $w0, 14
788 ; MSA-NEXT: st.w $w0, 0($4)
790 ; MSA64N32-LABEL: clei_s_w:
791 ; MSA64N32: # %bb.0: # %entry
792 ; MSA64N32-NEXT: sll $1, $4, 0
793 ; MSA64N32-NEXT: ld.w $w0, 0($1)
794 ; MSA64N32-NEXT: clei_s.w $w0, $w0, 14
795 ; MSA64N32-NEXT: jr $ra
796 ; MSA64N32-NEXT: st.w $w0, 0($1)
798 %a = load <4 x i32>, ptr %ptr, align 16
799 %r = call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %a, i32 14)
800 store <4 x i32> %r, ptr %ptr, align 16
804 define void @clei_u_w(ptr %ptr) {
805 ; MSA-LABEL: clei_u_w:
806 ; MSA: # %bb.0: # %entry
807 ; MSA-NEXT: ld.w $w0, 0($4)
808 ; MSA-NEXT: clei_u.w $w0, $w0, 25
810 ; MSA-NEXT: st.w $w0, 0($4)
812 ; MSA64N32-LABEL: clei_u_w:
813 ; MSA64N32: # %bb.0: # %entry
814 ; MSA64N32-NEXT: sll $1, $4, 0
815 ; MSA64N32-NEXT: ld.w $w0, 0($1)
816 ; MSA64N32-NEXT: clei_u.w $w0, $w0, 25
817 ; MSA64N32-NEXT: jr $ra
818 ; MSA64N32-NEXT: st.w $w0, 0($1)
820 %a = load <4 x i32>, ptr %ptr, align 16
821 %r = call <4 x i32> @llvm.mips.clei.u.w(<4 x i32> %a, i32 25)
822 store <4 x i32> %r, ptr %ptr, align 16
826 define void @clti_s_w(ptr %ptr) {
827 ; MSA-LABEL: clti_s_w:
828 ; MSA: # %bb.0: # %entry
829 ; MSA-NEXT: ld.w $w0, 0($4)
830 ; MSA-NEXT: clti_s.w $w0, $w0, 15
832 ; MSA-NEXT: st.w $w0, 0($4)
834 ; MSA64N32-LABEL: clti_s_w:
835 ; MSA64N32: # %bb.0: # %entry
836 ; MSA64N32-NEXT: sll $1, $4, 0
837 ; MSA64N32-NEXT: ld.w $w0, 0($1)
838 ; MSA64N32-NEXT: clti_s.w $w0, $w0, 15
839 ; MSA64N32-NEXT: jr $ra
840 ; MSA64N32-NEXT: st.w $w0, 0($1)
842 %a = load <4 x i32>, ptr %ptr, align 16
843 %r = call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %a, i32 15)
844 store <4 x i32> %r, ptr %ptr, align 16
848 define void @clti_u_w(ptr %ptr) {
849 ; MSA-LABEL: clti_u_w:
850 ; MSA: # %bb.0: # %entry
851 ; MSA-NEXT: ld.w $w0, 0($4)
852 ; MSA-NEXT: clti_u.w $w0, $w0, 25
854 ; MSA-NEXT: st.w $w0, 0($4)
856 ; MSA64N32-LABEL: clti_u_w:
857 ; MSA64N32: # %bb.0: # %entry
858 ; MSA64N32-NEXT: sll $1, $4, 0
859 ; MSA64N32-NEXT: ld.w $w0, 0($1)
860 ; MSA64N32-NEXT: clti_u.w $w0, $w0, 25
861 ; MSA64N32-NEXT: jr $ra
862 ; MSA64N32-NEXT: st.w $w0, 0($1)
864 %a = load <4 x i32>, ptr %ptr, align 16
865 %r = call <4 x i32> @llvm.mips.clti.u.w(<4 x i32> %a, i32 25)
866 store <4 x i32> %r, ptr %ptr, align 16
870 define void @maxi_s_w(ptr %ptr) {
871 ; MSA-LABEL: maxi_s_w:
872 ; MSA: # %bb.0: # %entry
873 ; MSA-NEXT: ld.w $w0, 0($4)
874 ; MSA-NEXT: maxi_s.w $w0, $w0, 2
876 ; MSA-NEXT: st.w $w0, 0($4)
878 ; MSA64N32-LABEL: maxi_s_w:
879 ; MSA64N32: # %bb.0: # %entry
880 ; MSA64N32-NEXT: sll $1, $4, 0
881 ; MSA64N32-NEXT: ld.w $w0, 0($1)
882 ; MSA64N32-NEXT: maxi_s.w $w0, $w0, 2
883 ; MSA64N32-NEXT: jr $ra
884 ; MSA64N32-NEXT: st.w $w0, 0($1)
886 %a = load <4 x i32>, ptr %ptr, align 16
887 %r = call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %a, i32 2)
888 store <4 x i32> %r, ptr %ptr, align 16
892 define void @maxi_u_w(ptr %ptr) {
893 ; MSA-LABEL: maxi_u_w:
894 ; MSA: # %bb.0: # %entry
895 ; MSA-NEXT: ld.w $w0, 0($4)
896 ; MSA-NEXT: maxi_u.w $w0, $w0, 2
898 ; MSA-NEXT: st.w $w0, 0($4)
900 ; MSA64N32-LABEL: maxi_u_w:
901 ; MSA64N32: # %bb.0: # %entry
902 ; MSA64N32-NEXT: sll $1, $4, 0
903 ; MSA64N32-NEXT: ld.w $w0, 0($1)
904 ; MSA64N32-NEXT: maxi_u.w $w0, $w0, 2
905 ; MSA64N32-NEXT: jr $ra
906 ; MSA64N32-NEXT: st.w $w0, 0($1)
908 %a = load <4 x i32>, ptr %ptr, align 16
909 %r = call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %a, i32 2)
910 store <4 x i32> %r, ptr %ptr, align 16
914 define void @mini_s_w(ptr %ptr) {
915 ; MSA-LABEL: mini_s_w:
916 ; MSA: # %bb.0: # %entry
917 ; MSA-NEXT: ld.w $w0, 0($4)
918 ; MSA-NEXT: mini_s.w $w0, $w0, 2
920 ; MSA-NEXT: st.w $w0, 0($4)
922 ; MSA64N32-LABEL: mini_s_w:
923 ; MSA64N32: # %bb.0: # %entry
924 ; MSA64N32-NEXT: sll $1, $4, 0
925 ; MSA64N32-NEXT: ld.w $w0, 0($1)
926 ; MSA64N32-NEXT: mini_s.w $w0, $w0, 2
927 ; MSA64N32-NEXT: jr $ra
928 ; MSA64N32-NEXT: st.w $w0, 0($1)
930 %a = load <4 x i32>, ptr %ptr, align 16
931 %r = call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %a, i32 2)
932 store <4 x i32> %r, ptr %ptr, align 16
936 define void @mini_u_w(ptr %ptr) {
937 ; MSA-LABEL: mini_u_w:
938 ; MSA: # %bb.0: # %entry
939 ; MSA-NEXT: ld.w $w0, 0($4)
940 ; MSA-NEXT: mini_u.w $w0, $w0, 2
942 ; MSA-NEXT: st.w $w0, 0($4)
944 ; MSA64N32-LABEL: mini_u_w:
945 ; MSA64N32: # %bb.0: # %entry
946 ; MSA64N32-NEXT: sll $1, $4, 0
947 ; MSA64N32-NEXT: ld.w $w0, 0($1)
948 ; MSA64N32-NEXT: mini_u.w $w0, $w0, 2
949 ; MSA64N32-NEXT: jr $ra
950 ; MSA64N32-NEXT: st.w $w0, 0($1)
952 %a = load <4 x i32>, ptr %ptr, align 16
953 %r = call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %a, i32 2)
954 store <4 x i32> %r, ptr %ptr, align 16
958 define void @ldi_w(ptr %ptr) {
960 ; MSA: # %bb.0: # %entry
961 ; MSA-NEXT: ldi.w $w0, 3
963 ; MSA-NEXT: st.w $w0, 0($4)
965 ; MSA64N32-LABEL: ldi_w:
966 ; MSA64N32: # %bb.0: # %entry
967 ; MSA64N32-NEXT: sll $1, $4, 0
968 ; MSA64N32-NEXT: ldi.w $w0, 3
969 ; MSA64N32-NEXT: jr $ra
970 ; MSA64N32-NEXT: st.w $w0, 0($1)
972 %r = call <4 x i32> @llvm.mips.ldi.w(i32 3)
973 store <4 x i32> %r, ptr %ptr, align 16
977 define void @sldi_w(ptr %ptr) {
979 ; MSA: # %bb.0: # %entry
980 ; MSA-NEXT: ld.w $w0, 0($4)
981 ; MSA-NEXT: sldi.w $w0, $w0[2]
983 ; MSA-NEXT: st.w $w0, 0($4)
985 ; MSA64N32-LABEL: sldi_w:
986 ; MSA64N32: # %bb.0: # %entry
987 ; MSA64N32-NEXT: sll $1, $4, 0
988 ; MSA64N32-NEXT: ld.w $w0, 0($1)
989 ; MSA64N32-NEXT: sldi.w $w0, $w0[2]
990 ; MSA64N32-NEXT: jr $ra
991 ; MSA64N32-NEXT: st.w $w0, 0($1)
993 %a = load <4 x i32>, ptr %ptr, align 16
994 %r = call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %a, <4 x i32> %a, i32 2)
995 store <4 x i32> %r, ptr %ptr, align 16
999 define void @slli_w(ptr %ptr) {
1000 ; MSA-LABEL: slli_w:
1001 ; MSA: # %bb.0: # %entry
1002 ; MSA-NEXT: ld.w $w0, 0($4)
1003 ; MSA-NEXT: slli.w $w0, $w0, 3
1005 ; MSA-NEXT: st.w $w0, 0($4)
1007 ; MSA64N32-LABEL: slli_w:
1008 ; MSA64N32: # %bb.0: # %entry
1009 ; MSA64N32-NEXT: sll $1, $4, 0
1010 ; MSA64N32-NEXT: ld.w $w0, 0($1)
1011 ; MSA64N32-NEXT: slli.w $w0, $w0, 3
1012 ; MSA64N32-NEXT: jr $ra
1013 ; MSA64N32-NEXT: st.w $w0, 0($1)
1015 %a = load <4 x i32>, ptr %ptr, align 16
1016 %r = call <4 x i32> @llvm.mips.slli.w(<4 x i32> %a, i32 3)
1017 store <4 x i32> %r, ptr %ptr, align 16
1021 define void @splati_w(ptr %ptr) {
1022 ; MSA-LABEL: splati_w:
1023 ; MSA: # %bb.0: # %entry
1024 ; MSA-NEXT: ld.w $w0, 0($4)
1025 ; MSA-NEXT: splati.w $w0, $w0[3]
1027 ; MSA-NEXT: st.w $w0, 0($4)
1029 ; MSA64N32-LABEL: splati_w:
1030 ; MSA64N32: # %bb.0: # %entry
1031 ; MSA64N32-NEXT: sll $1, $4, 0
1032 ; MSA64N32-NEXT: ld.w $w0, 0($1)
1033 ; MSA64N32-NEXT: splati.w $w0, $w0[3]
1034 ; MSA64N32-NEXT: jr $ra
1035 ; MSA64N32-NEXT: st.w $w0, 0($1)
1037 %a = load <4 x i32>, ptr %ptr, align 16
1038 %r = call <4 x i32> @llvm.mips.splati.w(<4 x i32> %a, i32 3)
1039 store <4 x i32> %r, ptr %ptr, align 16
1043 define void @srai_w(ptr %ptr) {
1044 ; MSA-LABEL: srai_w:
1045 ; MSA: # %bb.0: # %entry
1046 ; MSA-NEXT: ld.w $w0, 0($4)
1047 ; MSA-NEXT: srai.w $w0, $w0, 3
1049 ; MSA-NEXT: st.w $w0, 0($4)
1051 ; MSA64N32-LABEL: srai_w:
1052 ; MSA64N32: # %bb.0: # %entry
1053 ; MSA64N32-NEXT: sll $1, $4, 0
1054 ; MSA64N32-NEXT: ld.w $w0, 0($1)
1055 ; MSA64N32-NEXT: srai.w $w0, $w0, 3
1056 ; MSA64N32-NEXT: jr $ra
1057 ; MSA64N32-NEXT: st.w $w0, 0($1)
1059 %a = load <4 x i32>, ptr %ptr, align 16
1060 %r = call <4 x i32> @llvm.mips.srai.w(<4 x i32> %a, i32 3)
1061 store <4 x i32> %r, ptr %ptr, align 16
1065 define void @srari_w(ptr %ptr) {
1066 ; MSA-LABEL: srari_w:
1067 ; MSA: # %bb.0: # %entry
1068 ; MSA-NEXT: ld.w $w0, 0($4)
1069 ; MSA-NEXT: srari.w $w0, $w0, 3
1071 ; MSA-NEXT: st.w $w0, 0($4)
1073 ; MSA64N32-LABEL: srari_w:
1074 ; MSA64N32: # %bb.0: # %entry
1075 ; MSA64N32-NEXT: sll $1, $4, 0
1076 ; MSA64N32-NEXT: ld.w $w0, 0($1)
1077 ; MSA64N32-NEXT: srari.w $w0, $w0, 3
1078 ; MSA64N32-NEXT: jr $ra
1079 ; MSA64N32-NEXT: st.w $w0, 0($1)
1081 %a = load <4 x i32>, ptr %ptr, align 16
1082 %r = call <4 x i32> @llvm.mips.srari.w(<4 x i32> %a, i32 3)
1083 store <4 x i32> %r, ptr %ptr, align 16
1087 define void @srli_w(ptr %ptr) {
1088 ; MSA-LABEL: srli_w:
1089 ; MSA: # %bb.0: # %entry
1090 ; MSA-NEXT: ld.w $w0, 0($4)
1091 ; MSA-NEXT: srli.w $w0, $w0, 3
1093 ; MSA-NEXT: st.w $w0, 0($4)
1095 ; MSA64N32-LABEL: srli_w:
1096 ; MSA64N32: # %bb.0: # %entry
1097 ; MSA64N32-NEXT: sll $1, $4, 0
1098 ; MSA64N32-NEXT: ld.w $w0, 0($1)
1099 ; MSA64N32-NEXT: srli.w $w0, $w0, 3
1100 ; MSA64N32-NEXT: jr $ra
1101 ; MSA64N32-NEXT: st.w $w0, 0($1)
1103 %a = load <4 x i32>, ptr %ptr, align 16
1104 %r = call <4 x i32> @llvm.mips.srli.w(<4 x i32> %a, i32 3)
1105 store <4 x i32> %r, ptr %ptr, align 16
1109 define void @srlri_w(ptr %ptr) {
1110 ; MSA-LABEL: srlri_w:
1111 ; MSA: # %bb.0: # %entry
1112 ; MSA-NEXT: ld.w $w0, 0($4)
1113 ; MSA-NEXT: srlri.w $w0, $w0, 3
1115 ; MSA-NEXT: st.w $w0, 0($4)
1117 ; MSA64N32-LABEL: srlri_w:
1118 ; MSA64N32: # %bb.0: # %entry
1119 ; MSA64N32-NEXT: sll $1, $4, 0
1120 ; MSA64N32-NEXT: ld.w $w0, 0($1)
1121 ; MSA64N32-NEXT: srlri.w $w0, $w0, 3
1122 ; MSA64N32-NEXT: jr $ra
1123 ; MSA64N32-NEXT: st.w $w0, 0($1)
1125 %a = load <4 x i32>, ptr %ptr, align 16
1126 %r = call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %a, i32 3)
1127 store <4 x i32> %r, ptr %ptr, align 16
1131 define void @addvi_h(ptr %ptr) {
1132 ; MSA-LABEL: addvi_h:
1133 ; MSA: # %bb.0: # %entry
1134 ; MSA-NEXT: ld.h $w0, 0($4)
1135 ; MSA-NEXT: addvi.h $w0, $w0, 25
1137 ; MSA-NEXT: st.h $w0, 0($4)
1139 ; MSA64N32-LABEL: addvi_h:
1140 ; MSA64N32: # %bb.0: # %entry
1141 ; MSA64N32-NEXT: sll $1, $4, 0
1142 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1143 ; MSA64N32-NEXT: addvi.h $w0, $w0, 25
1144 ; MSA64N32-NEXT: jr $ra
1145 ; MSA64N32-NEXT: st.h $w0, 0($1)
1147 %a = load <8 x i16>, ptr %ptr, align 16
1148 %r = call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %a, i32 25)
1149 store <8 x i16> %r, ptr %ptr, align 16
1153 define void @bclri_h(ptr %ptr) {
1154 ; MSA-LABEL: bclri_h:
1155 ; MSA: # %bb.0: # %entry
1156 ; MSA-NEXT: ld.h $w0, 0($4)
1157 ; MSA-NEXT: bclri.h $w0, $w0, 8
1159 ; MSA-NEXT: st.h $w0, 0($4)
1161 ; MSA64N32-LABEL: bclri_h:
1162 ; MSA64N32: # %bb.0: # %entry
1163 ; MSA64N32-NEXT: sll $1, $4, 0
1164 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1165 ; MSA64N32-NEXT: bclri.h $w0, $w0, 8
1166 ; MSA64N32-NEXT: jr $ra
1167 ; MSA64N32-NEXT: st.h $w0, 0($1)
1169 %a = load <8 x i16>, ptr %ptr, align 16
1170 %r = call <8 x i16> @llvm.mips.bclri.h(<8 x i16> %a, i32 8)
1171 store <8 x i16> %r, ptr %ptr, align 16
1175 define void @binsli_h(ptr %ptr, ptr %ptr2) {
1176 ; MSA-LABEL: binsli_h:
1177 ; MSA: # %bb.0: # %entry
1178 ; MSA-NEXT: ld.h $w0, 0($5)
1179 ; MSA-NEXT: ld.h $w1, 0($4)
1180 ; MSA-NEXT: binsli.h $w1, $w0, 8
1182 ; MSA-NEXT: st.h $w1, 0($4)
1184 ; MSA64N32-LABEL: binsli_h:
1185 ; MSA64N32: # %bb.0: # %entry
1186 ; MSA64N32-NEXT: sll $1, $4, 0
1187 ; MSA64N32-NEXT: sll $2, $5, 0
1188 ; MSA64N32-NEXT: ld.h $w0, 0($2)
1189 ; MSA64N32-NEXT: ld.h $w1, 0($1)
1190 ; MSA64N32-NEXT: binsli.h $w1, $w0, 8
1191 ; MSA64N32-NEXT: jr $ra
1192 ; MSA64N32-NEXT: st.h $w1, 0($1)
1194 %a = load <8 x i16>, ptr %ptr, align 16
1195 %b = load <8 x i16>, ptr %ptr2, align 16
1196 %r = call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %a, <8 x i16> %b, i32 8)
1197 store <8 x i16> %r, ptr %ptr, align 16
1201 define void @binsri_h(ptr %ptr, ptr %ptr2) {
1202 ; MSA-LABEL: binsri_h:
1203 ; MSA: # %bb.0: # %entry
1204 ; MSA-NEXT: ld.h $w0, 0($5)
1205 ; MSA-NEXT: ld.h $w1, 0($4)
1206 ; MSA-NEXT: binsri.h $w1, $w0, 14
1208 ; MSA-NEXT: st.h $w1, 0($4)
1210 ; MSA64N32-LABEL: binsri_h:
1211 ; MSA64N32: # %bb.0: # %entry
1212 ; MSA64N32-NEXT: sll $1, $4, 0
1213 ; MSA64N32-NEXT: sll $2, $5, 0
1214 ; MSA64N32-NEXT: ld.h $w0, 0($2)
1215 ; MSA64N32-NEXT: ld.h $w1, 0($1)
1216 ; MSA64N32-NEXT: binsri.h $w1, $w0, 14
1217 ; MSA64N32-NEXT: jr $ra
1218 ; MSA64N32-NEXT: st.h $w1, 0($1)
1220 %a = load <8 x i16>, ptr %ptr, align 16
1221 %b = load <8 x i16>, ptr %ptr2, align 16
1222 %r = call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %a, <8 x i16> %b, i32 14)
1223 store <8 x i16> %r, ptr %ptr, align 16
1227 define void @bnegi_h(ptr %ptr) {
1228 ; MSA-LABEL: bnegi_h:
1229 ; MSA: # %bb.0: # %entry
1230 ; MSA-NEXT: ld.h $w0, 0($4)
1231 ; MSA-NEXT: bnegi.h $w0, $w0, 14
1233 ; MSA-NEXT: st.h $w0, 0($4)
1235 ; MSA64N32-LABEL: bnegi_h:
1236 ; MSA64N32: # %bb.0: # %entry
1237 ; MSA64N32-NEXT: sll $1, $4, 0
1238 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1239 ; MSA64N32-NEXT: bnegi.h $w0, $w0, 14
1240 ; MSA64N32-NEXT: jr $ra
1241 ; MSA64N32-NEXT: st.h $w0, 0($1)
1243 %a = load <8 x i16>, ptr %ptr, align 16
1244 %r = call <8 x i16> @llvm.mips.bnegi.h(<8 x i16> %a, i32 14)
1245 store <8 x i16> %r, ptr %ptr, align 16
1249 define void @bseti_h(ptr %ptr) {
1250 ; MSA-LABEL: bseti_h:
1251 ; MSA: # %bb.0: # %entry
1252 ; MSA-NEXT: ld.h $w0, 0($4)
1253 ; MSA-NEXT: bseti.h $w0, $w0, 15
1255 ; MSA-NEXT: st.h $w0, 0($4)
1257 ; MSA64N32-LABEL: bseti_h:
1258 ; MSA64N32: # %bb.0: # %entry
1259 ; MSA64N32-NEXT: sll $1, $4, 0
1260 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1261 ; MSA64N32-NEXT: bseti.h $w0, $w0, 15
1262 ; MSA64N32-NEXT: jr $ra
1263 ; MSA64N32-NEXT: st.h $w0, 0($1)
1265 %a = load <8 x i16>, ptr %ptr, align 16
1266 %r = call <8 x i16> @llvm.mips.bseti.h(<8 x i16> %a, i32 15)
1267 store <8 x i16> %r, ptr %ptr, align 16
1271 define void @clei_s_h(ptr %ptr) {
1272 ; MSA-LABEL: clei_s_h:
1273 ; MSA: # %bb.0: # %entry
1274 ; MSA-NEXT: ld.h $w0, 0($4)
1275 ; MSA-NEXT: clei_s.h $w0, $w0, 13
1277 ; MSA-NEXT: st.h $w0, 0($4)
1279 ; MSA64N32-LABEL: clei_s_h:
1280 ; MSA64N32: # %bb.0: # %entry
1281 ; MSA64N32-NEXT: sll $1, $4, 0
1282 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1283 ; MSA64N32-NEXT: clei_s.h $w0, $w0, 13
1284 ; MSA64N32-NEXT: jr $ra
1285 ; MSA64N32-NEXT: st.h $w0, 0($1)
1287 %a = load <8 x i16>, ptr %ptr, align 16
1288 %r = call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %a, i32 13)
1289 store <8 x i16> %r, ptr %ptr, align 16
1293 define void @clei_u_h(ptr %ptr) {
1294 ; MSA-LABEL: clei_u_h:
1295 ; MSA: # %bb.0: # %entry
1296 ; MSA-NEXT: ld.h $w0, 0($4)
1297 ; MSA-NEXT: clei_u.h $w0, $w0, 25
1299 ; MSA-NEXT: st.h $w0, 0($4)
1301 ; MSA64N32-LABEL: clei_u_h:
1302 ; MSA64N32: # %bb.0: # %entry
1303 ; MSA64N32-NEXT: sll $1, $4, 0
1304 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1305 ; MSA64N32-NEXT: clei_u.h $w0, $w0, 25
1306 ; MSA64N32-NEXT: jr $ra
1307 ; MSA64N32-NEXT: st.h $w0, 0($1)
1309 %a = load <8 x i16>, ptr %ptr, align 16
1310 %r = call <8 x i16> @llvm.mips.clei.u.h(<8 x i16> %a, i32 25)
1311 store <8 x i16> %r, ptr %ptr, align 16
1315 define void @clti_s_h(ptr %ptr) {
1316 ; MSA-LABEL: clti_s_h:
1317 ; MSA: # %bb.0: # %entry
1318 ; MSA-NEXT: ld.h $w0, 0($4)
1319 ; MSA-NEXT: clti_s.h $w0, $w0, 15
1321 ; MSA-NEXT: st.h $w0, 0($4)
1323 ; MSA64N32-LABEL: clti_s_h:
1324 ; MSA64N32: # %bb.0: # %entry
1325 ; MSA64N32-NEXT: sll $1, $4, 0
1326 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1327 ; MSA64N32-NEXT: clti_s.h $w0, $w0, 15
1328 ; MSA64N32-NEXT: jr $ra
1329 ; MSA64N32-NEXT: st.h $w0, 0($1)
1331 %a = load <8 x i16>, ptr %ptr, align 16
1332 %r = call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %a, i32 15)
1333 store <8 x i16> %r, ptr %ptr, align 16
1337 define void @clti_u_h(ptr %ptr) {
1338 ; MSA-LABEL: clti_u_h:
1339 ; MSA: # %bb.0: # %entry
1340 ; MSA-NEXT: ld.h $w0, 0($4)
1341 ; MSA-NEXT: clti_u.h $w0, $w0, 25
1343 ; MSA-NEXT: st.h $w0, 0($4)
1345 ; MSA64N32-LABEL: clti_u_h:
1346 ; MSA64N32: # %bb.0: # %entry
1347 ; MSA64N32-NEXT: sll $1, $4, 0
1348 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1349 ; MSA64N32-NEXT: clti_u.h $w0, $w0, 25
1350 ; MSA64N32-NEXT: jr $ra
1351 ; MSA64N32-NEXT: st.h $w0, 0($1)
1353 %a = load <8 x i16>, ptr %ptr, align 16
1354 %r = call <8 x i16> @llvm.mips.clti.u.h(<8 x i16> %a, i32 25)
1355 store <8 x i16> %r, ptr %ptr, align 16
1359 define void @maxi_s_h(ptr %ptr) {
1360 ; MSA-LABEL: maxi_s_h:
1361 ; MSA: # %bb.0: # %entry
1362 ; MSA-NEXT: ld.h $w0, 0($4)
1363 ; MSA-NEXT: maxi_s.h $w0, $w0, 2
1365 ; MSA-NEXT: st.h $w0, 0($4)
1367 ; MSA64N32-LABEL: maxi_s_h:
1368 ; MSA64N32: # %bb.0: # %entry
1369 ; MSA64N32-NEXT: sll $1, $4, 0
1370 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1371 ; MSA64N32-NEXT: maxi_s.h $w0, $w0, 2
1372 ; MSA64N32-NEXT: jr $ra
1373 ; MSA64N32-NEXT: st.h $w0, 0($1)
1375 %a = load <8 x i16>, ptr %ptr, align 16
1376 %r = call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %a, i32 2)
1377 store <8 x i16> %r, ptr %ptr, align 16
1381 define void @maxi_u_h(ptr %ptr) {
1382 ; MSA-LABEL: maxi_u_h:
1383 ; MSA: # %bb.0: # %entry
1384 ; MSA-NEXT: ld.h $w0, 0($4)
1385 ; MSA-NEXT: maxi_u.h $w0, $w0, 2
1387 ; MSA-NEXT: st.h $w0, 0($4)
1389 ; MSA64N32-LABEL: maxi_u_h:
1390 ; MSA64N32: # %bb.0: # %entry
1391 ; MSA64N32-NEXT: sll $1, $4, 0
1392 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1393 ; MSA64N32-NEXT: maxi_u.h $w0, $w0, 2
1394 ; MSA64N32-NEXT: jr $ra
1395 ; MSA64N32-NEXT: st.h $w0, 0($1)
1397 %a = load <8 x i16>, ptr %ptr, align 16
1398 %r = call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %a, i32 2)
1399 store <8 x i16> %r, ptr %ptr, align 16
1403 define void @mini_s_h(ptr %ptr) {
1404 ; MSA-LABEL: mini_s_h:
1405 ; MSA: # %bb.0: # %entry
1406 ; MSA-NEXT: ld.h $w0, 0($4)
1407 ; MSA-NEXT: mini_s.h $w0, $w0, 2
1409 ; MSA-NEXT: st.h $w0, 0($4)
1411 ; MSA64N32-LABEL: mini_s_h:
1412 ; MSA64N32: # %bb.0: # %entry
1413 ; MSA64N32-NEXT: sll $1, $4, 0
1414 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1415 ; MSA64N32-NEXT: mini_s.h $w0, $w0, 2
1416 ; MSA64N32-NEXT: jr $ra
1417 ; MSA64N32-NEXT: st.h $w0, 0($1)
1419 %a = load <8 x i16>, ptr %ptr, align 16
1420 %r = call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %a, i32 2)
1421 store <8 x i16> %r, ptr %ptr, align 16
1425 define void @mini_u_h(ptr %ptr) {
1426 ; MSA-LABEL: mini_u_h:
1427 ; MSA: # %bb.0: # %entry
1428 ; MSA-NEXT: ld.h $w0, 0($4)
1429 ; MSA-NEXT: mini_u.h $w0, $w0, 2
1431 ; MSA-NEXT: st.h $w0, 0($4)
1433 ; MSA64N32-LABEL: mini_u_h:
1434 ; MSA64N32: # %bb.0: # %entry
1435 ; MSA64N32-NEXT: sll $1, $4, 0
1436 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1437 ; MSA64N32-NEXT: mini_u.h $w0, $w0, 2
1438 ; MSA64N32-NEXT: jr $ra
1439 ; MSA64N32-NEXT: st.h $w0, 0($1)
1441 %a = load <8 x i16>, ptr %ptr, align 16
1442 %r = call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %a, i32 2)
1443 store <8 x i16> %r, ptr %ptr, align 16
1447 define void @ldi_h(ptr %ptr) {
1449 ; MSA: # %bb.0: # %entry
1450 ; MSA-NEXT: ldi.h $w0, 3
1452 ; MSA-NEXT: st.h $w0, 0($4)
1454 ; MSA64N32-LABEL: ldi_h:
1455 ; MSA64N32: # %bb.0: # %entry
1456 ; MSA64N32-NEXT: sll $1, $4, 0
1457 ; MSA64N32-NEXT: ldi.h $w0, 3
1458 ; MSA64N32-NEXT: jr $ra
1459 ; MSA64N32-NEXT: st.h $w0, 0($1)
1461 %r = call <8 x i16> @llvm.mips.ldi.h(i32 3)
1462 store <8 x i16> %r, ptr %ptr, align 16
1466 define void @sldi_h(ptr %ptr) {
1467 ; MSA-LABEL: sldi_h:
1468 ; MSA: # %bb.0: # %entry
1469 ; MSA-NEXT: ld.h $w0, 0($4)
1470 ; MSA-NEXT: sldi.h $w0, $w0[3]
1472 ; MSA-NEXT: st.h $w0, 0($4)
1474 ; MSA64N32-LABEL: sldi_h:
1475 ; MSA64N32: # %bb.0: # %entry
1476 ; MSA64N32-NEXT: sll $1, $4, 0
1477 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1478 ; MSA64N32-NEXT: sldi.h $w0, $w0[3]
1479 ; MSA64N32-NEXT: jr $ra
1480 ; MSA64N32-NEXT: st.h $w0, 0($1)
1482 %a = load <8 x i16>, ptr %ptr, align 16
1483 %r = call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %a, <8 x i16> %a, i32 3)
1484 store <8 x i16> %r, ptr %ptr, align 16
1488 define void @slli_h(ptr %ptr) {
1489 ; MSA-LABEL: slli_h:
1490 ; MSA: # %bb.0: # %entry
1491 ; MSA-NEXT: ld.h $w0, 0($4)
1492 ; MSA-NEXT: slli.h $w0, $w0, 3
1494 ; MSA-NEXT: st.h $w0, 0($4)
1496 ; MSA64N32-LABEL: slli_h:
1497 ; MSA64N32: # %bb.0: # %entry
1498 ; MSA64N32-NEXT: sll $1, $4, 0
1499 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1500 ; MSA64N32-NEXT: slli.h $w0, $w0, 3
1501 ; MSA64N32-NEXT: jr $ra
1502 ; MSA64N32-NEXT: st.h $w0, 0($1)
1504 %a = load <8 x i16>, ptr %ptr, align 16
1505 %r = call <8 x i16> @llvm.mips.slli.h(<8 x i16> %a, i32 3)
1506 store <8 x i16> %r, ptr %ptr, align 16
1510 define void @splati_h(ptr %ptr) {
1511 ; MSA-LABEL: splati_h:
1512 ; MSA: # %bb.0: # %entry
1513 ; MSA-NEXT: ld.h $w0, 0($4)
1514 ; MSA-NEXT: splati.h $w0, $w0[3]
1516 ; MSA-NEXT: st.h $w0, 0($4)
1518 ; MSA64N32-LABEL: splati_h:
1519 ; MSA64N32: # %bb.0: # %entry
1520 ; MSA64N32-NEXT: sll $1, $4, 0
1521 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1522 ; MSA64N32-NEXT: splati.h $w0, $w0[3]
1523 ; MSA64N32-NEXT: jr $ra
1524 ; MSA64N32-NEXT: st.h $w0, 0($1)
1526 %a = load <8 x i16>, ptr %ptr, align 16
1527 %r = call <8 x i16> @llvm.mips.splati.h(<8 x i16> %a, i32 3)
1528 store <8 x i16> %r, ptr %ptr, align 16
1532 define void @srai_h(ptr %ptr) {
1533 ; MSA-LABEL: srai_h:
1534 ; MSA: # %bb.0: # %entry
1535 ; MSA-NEXT: ld.h $w0, 0($4)
1536 ; MSA-NEXT: srai.h $w0, $w0, 3
1538 ; MSA-NEXT: st.h $w0, 0($4)
1540 ; MSA64N32-LABEL: srai_h:
1541 ; MSA64N32: # %bb.0: # %entry
1542 ; MSA64N32-NEXT: sll $1, $4, 0
1543 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1544 ; MSA64N32-NEXT: srai.h $w0, $w0, 3
1545 ; MSA64N32-NEXT: jr $ra
1546 ; MSA64N32-NEXT: st.h $w0, 0($1)
1548 %a = load <8 x i16>, ptr %ptr, align 16
1549 %r = call <8 x i16> @llvm.mips.srai.h(<8 x i16> %a, i32 3)
1550 store <8 x i16> %r, ptr %ptr, align 16
1554 define void @srari_h(ptr %ptr) {
1555 ; MSA-LABEL: srari_h:
1556 ; MSA: # %bb.0: # %entry
1557 ; MSA-NEXT: ld.h $w0, 0($4)
1558 ; MSA-NEXT: srari.h $w0, $w0, 3
1560 ; MSA-NEXT: st.h $w0, 0($4)
1562 ; MSA64N32-LABEL: srari_h:
1563 ; MSA64N32: # %bb.0: # %entry
1564 ; MSA64N32-NEXT: sll $1, $4, 0
1565 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1566 ; MSA64N32-NEXT: srari.h $w0, $w0, 3
1567 ; MSA64N32-NEXT: jr $ra
1568 ; MSA64N32-NEXT: st.h $w0, 0($1)
1570 %a = load <8 x i16>, ptr %ptr, align 16
1571 %r = call <8 x i16> @llvm.mips.srari.h(<8 x i16> %a, i32 3)
1572 store <8 x i16> %r, ptr %ptr, align 16
1576 define void @srli_h(ptr %ptr) {
1577 ; MSA-LABEL: srli_h:
1578 ; MSA: # %bb.0: # %entry
1579 ; MSA-NEXT: ld.h $w0, 0($4)
1580 ; MSA-NEXT: srli.h $w0, $w0, 3
1582 ; MSA-NEXT: st.h $w0, 0($4)
1584 ; MSA64N32-LABEL: srli_h:
1585 ; MSA64N32: # %bb.0: # %entry
1586 ; MSA64N32-NEXT: sll $1, $4, 0
1587 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1588 ; MSA64N32-NEXT: srli.h $w0, $w0, 3
1589 ; MSA64N32-NEXT: jr $ra
1590 ; MSA64N32-NEXT: st.h $w0, 0($1)
1592 %a = load <8 x i16>, ptr %ptr, align 16
1593 %r = call <8 x i16> @llvm.mips.srli.h(<8 x i16> %a, i32 3)
1594 store <8 x i16> %r, ptr %ptr, align 16
1598 define void @srlri_h(ptr %ptr) {
1599 ; MSA-LABEL: srlri_h:
1600 ; MSA: # %bb.0: # %entry
1601 ; MSA-NEXT: ld.h $w0, 0($4)
1602 ; MSA-NEXT: srlri.h $w0, $w0, 3
1604 ; MSA-NEXT: st.h $w0, 0($4)
1606 ; MSA64N32-LABEL: srlri_h:
1607 ; MSA64N32: # %bb.0: # %entry
1608 ; MSA64N32-NEXT: sll $1, $4, 0
1609 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1610 ; MSA64N32-NEXT: srlri.h $w0, $w0, 3
1611 ; MSA64N32-NEXT: jr $ra
1612 ; MSA64N32-NEXT: st.h $w0, 0($1)
1614 %a = load <8 x i16>, ptr %ptr, align 16
1615 %r = call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %a, i32 3)
1616 store <8 x i16> %r, ptr %ptr, align 16
1620 define i32 @copy_s_b(ptr %ptr) {
1621 ; MSA-LABEL: copy_s_b:
1622 ; MSA: # %bb.0: # %entry
1623 ; MSA-NEXT: ld.b $w0, 0($4)
1625 ; MSA-NEXT: copy_s.b $2, $w0[1]
1627 ; MSA64N32-LABEL: copy_s_b:
1628 ; MSA64N32: # %bb.0: # %entry
1629 ; MSA64N32-NEXT: sll $1, $4, 0
1630 ; MSA64N32-NEXT: ld.b $w0, 0($1)
1631 ; MSA64N32-NEXT: jr $ra
1632 ; MSA64N32-NEXT: copy_s.b $2, $w0[1]
1634 %a = load <16 x i8>, ptr %ptr, align 16
1635 %r = call i32 @llvm.mips.copy.s.b(<16 x i8> %a, i32 1)
1638 define i32 @copy_s_h(ptr %ptr) {
1639 ; MSA-LABEL: copy_s_h:
1640 ; MSA: # %bb.0: # %entry
1641 ; MSA-NEXT: ld.h $w0, 0($4)
1643 ; MSA-NEXT: copy_s.h $2, $w0[1]
1645 ; MSA64N32-LABEL: copy_s_h:
1646 ; MSA64N32: # %bb.0: # %entry
1647 ; MSA64N32-NEXT: sll $1, $4, 0
1648 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1649 ; MSA64N32-NEXT: jr $ra
1650 ; MSA64N32-NEXT: copy_s.h $2, $w0[1]
1652 %a = load <8 x i16>, ptr %ptr, align 16
1653 %r = call i32 @llvm.mips.copy.s.h(<8 x i16> %a, i32 1)
1656 define i32 @copy_s_w(ptr %ptr) {
1657 ; MSA-LABEL: copy_s_w:
1658 ; MSA: # %bb.0: # %entry
1659 ; MSA-NEXT: ld.w $w0, 0($4)
1661 ; MSA-NEXT: copy_s.w $2, $w0[1]
1663 ; MSA64N32-LABEL: copy_s_w:
1664 ; MSA64N32: # %bb.0: # %entry
1665 ; MSA64N32-NEXT: sll $1, $4, 0
1666 ; MSA64N32-NEXT: ld.w $w0, 0($1)
1667 ; MSA64N32-NEXT: jr $ra
1668 ; MSA64N32-NEXT: copy_s.w $2, $w0[1]
1670 %a = load <4 x i32>, ptr %ptr, align 16
1671 %r = call i32 @llvm.mips.copy.s.w(<4 x i32> %a, i32 1)
1674 define i32 @copy_u_b(ptr %ptr) {
1675 ; MSA-LABEL: copy_u_b:
1676 ; MSA: # %bb.0: # %entry
1677 ; MSA-NEXT: ld.b $w0, 0($4)
1679 ; MSA-NEXT: copy_u.b $2, $w0[1]
1681 ; MSA64N32-LABEL: copy_u_b:
1682 ; MSA64N32: # %bb.0: # %entry
1683 ; MSA64N32-NEXT: sll $1, $4, 0
1684 ; MSA64N32-NEXT: ld.b $w0, 0($1)
1685 ; MSA64N32-NEXT: jr $ra
1686 ; MSA64N32-NEXT: copy_u.b $2, $w0[1]
1688 %a = load <16 x i8>, ptr %ptr, align 16
1689 %r = call i32 @llvm.mips.copy.u.b(<16 x i8> %a, i32 1)
1692 define i32 @copy_u_h(ptr %ptr) {
1693 ; MSA-LABEL: copy_u_h:
1694 ; MSA: # %bb.0: # %entry
1695 ; MSA-NEXT: ld.h $w0, 0($4)
1697 ; MSA-NEXT: copy_u.h $2, $w0[1]
1699 ; MSA64N32-LABEL: copy_u_h:
1700 ; MSA64N32: # %bb.0: # %entry
1701 ; MSA64N32-NEXT: sll $1, $4, 0
1702 ; MSA64N32-NEXT: ld.h $w0, 0($1)
1703 ; MSA64N32-NEXT: jr $ra
1704 ; MSA64N32-NEXT: copy_u.h $2, $w0[1]
1706 %a = load <8 x i16>, ptr %ptr, align 16
1707 %r = call i32 @llvm.mips.copy.u.h(<8 x i16> %a, i32 1)
1710 define i32 @copy_u_w(ptr %ptr) {
1711 ; MSA32-LABEL: copy_u_w:
1712 ; MSA32: # %bb.0: # %entry
1713 ; MSA32-NEXT: ld.w $w0, 0($4)
1714 ; MSA32-NEXT: jr $ra
1715 ; MSA32-NEXT: copy_s.w $2, $w0[1]
1717 ; MSA64N32-LABEL: copy_u_w:
1718 ; MSA64N32: # %bb.0: # %entry
1719 ; MSA64N32-NEXT: sll $1, $4, 0
1720 ; MSA64N32-NEXT: ld.w $w0, 0($1)
1721 ; MSA64N32-NEXT: jr $ra
1722 ; MSA64N32-NEXT: copy_u.w $2, $w0[1]
1724 ; MSA64N64-LABEL: copy_u_w:
1725 ; MSA64N64: # %bb.0: # %entry
1726 ; MSA64N64-NEXT: ld.w $w0, 0($4)
1727 ; MSA64N64-NEXT: jr $ra
1728 ; MSA64N64-NEXT: copy_u.w $2, $w0[1]
1730 %a = load <4 x i32>, ptr %ptr, align 16
1731 %r = call i32 @llvm.mips.copy.u.w(<4 x i32> %a, i32 1)
1735 define i64 @copy_s_d(ptr %ptr) {
1736 ; MSA32-LABEL: copy_s_d:
1737 ; MSA32: # %bb.0: # %entry
1738 ; MSA32-NEXT: ld.w $w0, 0($4)
1739 ; MSA32-NEXT: copy_s.w $2, $w0[2]
1740 ; MSA32-NEXT: jr $ra
1741 ; MSA32-NEXT: copy_s.w $3, $w0[3]
1743 ; MSA64N32-LABEL: copy_s_d:
1744 ; MSA64N32: # %bb.0: # %entry
1745 ; MSA64N32-NEXT: sll $1, $4, 0
1746 ; MSA64N32-NEXT: ld.d $w0, 0($1)
1747 ; MSA64N32-NEXT: jr $ra
1748 ; MSA64N32-NEXT: copy_s.d $2, $w0[1]
1750 ; MSA64N64-LABEL: copy_s_d:
1751 ; MSA64N64: # %bb.0: # %entry
1752 ; MSA64N64-NEXT: ld.d $w0, 0($4)
1753 ; MSA64N64-NEXT: jr $ra
1754 ; MSA64N64-NEXT: copy_s.d $2, $w0[1]
1756 %a = load <2 x i64>, ptr %ptr, align 16
1757 %r = call i64 @llvm.mips.copy.s.d(<2 x i64> %a, i32 1)
1761 define i64 @copy_u_d(ptr %ptr) {
1762 ; MSA32-LABEL: copy_u_d:
1763 ; MSA32: # %bb.0: # %entry
1764 ; MSA32-NEXT: ld.w $w0, 0($4)
1765 ; MSA32-NEXT: copy_s.w $2, $w0[2]
1766 ; MSA32-NEXT: jr $ra
1767 ; MSA32-NEXT: copy_s.w $3, $w0[3]
1769 ; MSA64N32-LABEL: copy_u_d:
1770 ; MSA64N32: # %bb.0: # %entry
1771 ; MSA64N32-NEXT: sll $1, $4, 0
1772 ; MSA64N32-NEXT: ld.d $w0, 0($1)
1773 ; MSA64N32-NEXT: jr $ra
1774 ; MSA64N32-NEXT: copy_s.d $2, $w0[1]
1776 ; MSA64N64-LABEL: copy_u_d:
1777 ; MSA64N64: # %bb.0: # %entry
1778 ; MSA64N64-NEXT: ld.d $w0, 0($4)
1779 ; MSA64N64-NEXT: jr $ra
1780 ; MSA64N64-NEXT: copy_s.d $2, $w0[1]
1782 %a = load <2 x i64>, ptr %ptr, align 16
1783 %r = call i64 @llvm.mips.copy.u.d(<2 x i64> %a, i32 1)
1787 define void @addvi_d(ptr %ptr) {
1788 ; MSA-LABEL: addvi_d:
1789 ; MSA: # %bb.0: # %entry
1790 ; MSA-NEXT: ld.d $w0, 0($4)
1791 ; MSA-NEXT: addvi.d $w0, $w0, 25
1793 ; MSA-NEXT: st.d $w0, 0($4)
1795 ; MSA64N32-LABEL: addvi_d:
1796 ; MSA64N32: # %bb.0: # %entry
1797 ; MSA64N32-NEXT: sll $1, $4, 0
1798 ; MSA64N32-NEXT: ld.d $w0, 0($1)
1799 ; MSA64N32-NEXT: addvi.d $w0, $w0, 25
1800 ; MSA64N32-NEXT: jr $ra
1801 ; MSA64N32-NEXT: st.d $w0, 0($1)
1803 %a = load <2 x i64>, ptr %ptr, align 16
1804 %r = call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %a, i32 25)
1805 store <2 x i64> %r, ptr %ptr, align 16
1809 define void @bclri_d(ptr %ptr) {
1810 ; MSA-LABEL: bclri_d:
1811 ; MSA: # %bb.0: # %entry
1812 ; MSA-NEXT: ld.d $w0, 0($4)
1813 ; MSA-NEXT: bclri.d $w0, $w0, 16
1815 ; MSA-NEXT: st.d $w0, 0($4)
1817 ; MSA64N32-LABEL: bclri_d:
1818 ; MSA64N32: # %bb.0: # %entry
1819 ; MSA64N32-NEXT: sll $1, $4, 0
1820 ; MSA64N32-NEXT: ld.d $w0, 0($1)
1821 ; MSA64N32-NEXT: bclri.d $w0, $w0, 16
1822 ; MSA64N32-NEXT: jr $ra
1823 ; MSA64N32-NEXT: st.d $w0, 0($1)
1825 %a = load <2 x i64>, ptr %ptr, align 16
1826 %r = call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %a, i32 16)
1827 store <2 x i64> %r, ptr %ptr, align 16
1831 define void @binsli_d(ptr %ptr, ptr %ptr2) {
1832 ; MSA-LABEL: binsli_d:
1833 ; MSA: # %bb.0: # %entry
1834 ; MSA-NEXT: ld.d $w0, 0($5)
1835 ; MSA-NEXT: ld.d $w1, 0($4)
1836 ; MSA-NEXT: binsli.d $w1, $w0, 4
1838 ; MSA-NEXT: st.d $w1, 0($4)
1840 ; MSA64N32-LABEL: binsli_d:
1841 ; MSA64N32: # %bb.0: # %entry
1842 ; MSA64N32-NEXT: sll $1, $4, 0
1843 ; MSA64N32-NEXT: sll $2, $5, 0
1844 ; MSA64N32-NEXT: ld.d $w0, 0($2)
1845 ; MSA64N32-NEXT: ld.d $w1, 0($1)
1846 ; MSA64N32-NEXT: binsli.d $w1, $w0, 4
1847 ; MSA64N32-NEXT: jr $ra
1848 ; MSA64N32-NEXT: st.d $w1, 0($1)
1850 %a = load <2 x i64>, ptr %ptr, align 16
1851 %b = load <2 x i64>, ptr %ptr2, align 16
1852 %r = call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %a, <2 x i64> %b, i32 4)
1853 store <2 x i64> %r, ptr %ptr, align 16
1857 define void @binsri_d(ptr %ptr, ptr %ptr2) {
1858 ; MSA-LABEL: binsri_d:
1859 ; MSA: # %bb.0: # %entry
1860 ; MSA-NEXT: ld.d $w0, 0($5)
1861 ; MSA-NEXT: ld.d $w1, 0($4)
1862 ; MSA-NEXT: binsri.d $w1, $w0, 5
1864 ; MSA-NEXT: st.d $w1, 0($4)
1866 ; MSA64N32-LABEL: binsri_d:
1867 ; MSA64N32: # %bb.0: # %entry
1868 ; MSA64N32-NEXT: sll $1, $4, 0
1869 ; MSA64N32-NEXT: sll $2, $5, 0
1870 ; MSA64N32-NEXT: ld.d $w0, 0($2)
1871 ; MSA64N32-NEXT: ld.d $w1, 0($1)
1872 ; MSA64N32-NEXT: binsri.d $w1, $w0, 5
1873 ; MSA64N32-NEXT: jr $ra
1874 ; MSA64N32-NEXT: st.d $w1, 0($1)
1876 %a = load <2 x i64>, ptr %ptr, align 16
1877 %b = load <2 x i64>, ptr %ptr2, align 16
1878 %r = call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %a, <2 x i64> %b, i32 5)
1879 store <2 x i64> %r, ptr %ptr, align 16
1883 define void @bnegi_d(ptr %ptr) {
1884 ; MSA-LABEL: bnegi_d:
1885 ; MSA: # %bb.0: # %entry
1886 ; MSA-NEXT: ld.d $w0, 0($4)
1887 ; MSA-NEXT: bnegi.d $w0, $w0, 9
1889 ; MSA-NEXT: st.d $w0, 0($4)
1891 ; MSA64N32-LABEL: bnegi_d:
1892 ; MSA64N32: # %bb.0: # %entry
1893 ; MSA64N32-NEXT: sll $1, $4, 0
1894 ; MSA64N32-NEXT: ld.d $w0, 0($1)
1895 ; MSA64N32-NEXT: bnegi.d $w0, $w0, 9
1896 ; MSA64N32-NEXT: jr $ra
1897 ; MSA64N32-NEXT: st.d $w0, 0($1)
1899 %a = load <2 x i64>, ptr %ptr, align 16
1900 %r = call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %a, i32 9)
1901 store <2 x i64> %r, ptr %ptr, align 16
1905 define void @bseti_d(ptr %ptr) {
1906 ; MSA-LABEL: bseti_d:
1907 ; MSA: # %bb.0: # %entry
1908 ; MSA-NEXT: ld.d $w0, 0($4)
1909 ; MSA-NEXT: bseti.d $w0, $w0, 25
1911 ; MSA-NEXT: st.d $w0, 0($4)
1913 ; MSA64N32-LABEL: bseti_d:
1914 ; MSA64N32: # %bb.0: # %entry
1915 ; MSA64N32-NEXT: sll $1, $4, 0
1916 ; MSA64N32-NEXT: ld.d $w0, 0($1)
1917 ; MSA64N32-NEXT: bseti.d $w0, $w0, 25
1918 ; MSA64N32-NEXT: jr $ra
1919 ; MSA64N32-NEXT: st.d $w0, 0($1)
1921 %a = load <2 x i64>, ptr %ptr, align 16
1922 %r = call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %a, i32 25)
1923 store <2 x i64> %r, ptr %ptr, align 16
1927 define void @clei_s_d(ptr %ptr) {
1928 ; MSA-LABEL: clei_s_d:
1929 ; MSA: # %bb.0: # %entry
1930 ; MSA-NEXT: ld.d $w0, 0($4)
1931 ; MSA-NEXT: clei_s.d $w0, $w0, 15
1933 ; MSA-NEXT: st.d $w0, 0($4)
1935 ; MSA64N32-LABEL: clei_s_d:
1936 ; MSA64N32: # %bb.0: # %entry
1937 ; MSA64N32-NEXT: sll $1, $4, 0
1938 ; MSA64N32-NEXT: ld.d $w0, 0($1)
1939 ; MSA64N32-NEXT: clei_s.d $w0, $w0, 15
1940 ; MSA64N32-NEXT: jr $ra
1941 ; MSA64N32-NEXT: st.d $w0, 0($1)
1943 %a = load <2 x i64>, ptr %ptr, align 16
1944 %r = call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %a, i32 15)
1945 store <2 x i64> %r, ptr %ptr, align 16
1949 define void @clei_u_d(ptr %ptr) {
1950 ; MSA-LABEL: clei_u_d:
1951 ; MSA: # %bb.0: # %entry
1952 ; MSA-NEXT: ld.d $w0, 0($4)
1953 ; MSA-NEXT: clei_u.d $w0, $w0, 25
1955 ; MSA-NEXT: st.d $w0, 0($4)
1957 ; MSA64N32-LABEL: clei_u_d:
1958 ; MSA64N32: # %bb.0: # %entry
1959 ; MSA64N32-NEXT: sll $1, $4, 0
1960 ; MSA64N32-NEXT: ld.d $w0, 0($1)
1961 ; MSA64N32-NEXT: clei_u.d $w0, $w0, 25
1962 ; MSA64N32-NEXT: jr $ra
1963 ; MSA64N32-NEXT: st.d $w0, 0($1)
1965 %a = load <2 x i64>, ptr %ptr, align 16
1966 %r = call <2 x i64> @llvm.mips.clei.u.d(<2 x i64> %a, i32 25)
1967 store <2 x i64> %r, ptr %ptr, align 16
1971 define void @clti_s_d(ptr %ptr) {
1972 ; MSA-LABEL: clti_s_d:
1973 ; MSA: # %bb.0: # %entry
1974 ; MSA-NEXT: ld.d $w0, 0($4)
1975 ; MSA-NEXT: clti_s.d $w0, $w0, 15
1977 ; MSA-NEXT: st.d $w0, 0($4)
1979 ; MSA64N32-LABEL: clti_s_d:
1980 ; MSA64N32: # %bb.0: # %entry
1981 ; MSA64N32-NEXT: sll $1, $4, 0
1982 ; MSA64N32-NEXT: ld.d $w0, 0($1)
1983 ; MSA64N32-NEXT: clti_s.d $w0, $w0, 15
1984 ; MSA64N32-NEXT: jr $ra
1985 ; MSA64N32-NEXT: st.d $w0, 0($1)
1987 %a = load <2 x i64>, ptr %ptr, align 16
1988 %r = call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %a, i32 15)
1989 store <2 x i64> %r, ptr %ptr, align 16
1993 define void @clti_u_d(ptr %ptr) {
1994 ; MSA-LABEL: clti_u_d:
1995 ; MSA: # %bb.0: # %entry
1996 ; MSA-NEXT: ld.d $w0, 0($4)
1997 ; MSA-NEXT: clti_u.d $w0, $w0, 25
1999 ; MSA-NEXT: st.d $w0, 0($4)
2001 ; MSA64N32-LABEL: clti_u_d:
2002 ; MSA64N32: # %bb.0: # %entry
2003 ; MSA64N32-NEXT: sll $1, $4, 0
2004 ; MSA64N32-NEXT: ld.d $w0, 0($1)
2005 ; MSA64N32-NEXT: clti_u.d $w0, $w0, 25
2006 ; MSA64N32-NEXT: jr $ra
2007 ; MSA64N32-NEXT: st.d $w0, 0($1)
2009 %a = load <2 x i64>, ptr %ptr, align 16
2010 %r = call <2 x i64> @llvm.mips.clti.u.d(<2 x i64> %a, i32 25)
2011 store <2 x i64> %r, ptr %ptr, align 16
2015 define void @ldi_d(ptr %ptr) {
2016 ; MSA32-LABEL: ldi_d:
2017 ; MSA32: # %bb.0: # %entry
2018 ; MSA32-NEXT: ldi.d $w0, 3
2019 ; MSA32-NEXT: jr $ra
2020 ; MSA32-NEXT: st.w $w0, 0($4)
2022 ; MSA64N32-LABEL: ldi_d:
2023 ; MSA64N32: # %bb.0: # %entry
2024 ; MSA64N32-NEXT: sll $1, $4, 0
2025 ; MSA64N32-NEXT: ldi.d $w0, 3
2026 ; MSA64N32-NEXT: jr $ra
2027 ; MSA64N32-NEXT: st.d $w0, 0($1)
2029 ; MSA64N64-LABEL: ldi_d:
2030 ; MSA64N64: # %bb.0: # %entry
2031 ; MSA64N64-NEXT: ldi.d $w0, 3
2032 ; MSA64N64-NEXT: jr $ra
2033 ; MSA64N64-NEXT: st.d $w0, 0($4)
2035 %r = call <2 x i64> @llvm.mips.ldi.d(i32 3)
2036 store <2 x i64> %r, ptr %ptr, align 16
2040 define void @maxi_s_d(ptr %ptr) {
2041 ; MSA-LABEL: maxi_s_d:
2042 ; MSA: # %bb.0: # %entry
2043 ; MSA-NEXT: ld.d $w0, 0($4)
2044 ; MSA-NEXT: maxi_s.d $w0, $w0, 2
2046 ; MSA-NEXT: st.d $w0, 0($4)
2048 ; MSA64N32-LABEL: maxi_s_d:
2049 ; MSA64N32: # %bb.0: # %entry
2050 ; MSA64N32-NEXT: sll $1, $4, 0
2051 ; MSA64N32-NEXT: ld.d $w0, 0($1)
2052 ; MSA64N32-NEXT: maxi_s.d $w0, $w0, 2
2053 ; MSA64N32-NEXT: jr $ra
2054 ; MSA64N32-NEXT: st.d $w0, 0($1)
2056 %a = load <2 x i64>, ptr %ptr, align 16
2057 %r = call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %a, i32 2)
2058 store <2 x i64> %r, ptr %ptr, align 16
2062 define void @maxi_u_d(ptr %ptr) {
2063 ; MSA-LABEL: maxi_u_d:
2064 ; MSA: # %bb.0: # %entry
2065 ; MSA-NEXT: ld.d $w0, 0($4)
2066 ; MSA-NEXT: maxi_u.d $w0, $w0, 2
2068 ; MSA-NEXT: st.d $w0, 0($4)
2070 ; MSA64N32-LABEL: maxi_u_d:
2071 ; MSA64N32: # %bb.0: # %entry
2072 ; MSA64N32-NEXT: sll $1, $4, 0
2073 ; MSA64N32-NEXT: ld.d $w0, 0($1)
2074 ; MSA64N32-NEXT: maxi_u.d $w0, $w0, 2
2075 ; MSA64N32-NEXT: jr $ra
2076 ; MSA64N32-NEXT: st.d $w0, 0($1)
2078 %a = load <2 x i64>, ptr %ptr, align 16
2079 %r = call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %a, i32 2)
2080 store <2 x i64> %r, ptr %ptr, align 16
2084 define void @mini_s_d(ptr %ptr) {
2085 ; MSA-LABEL: mini_s_d:
2086 ; MSA: # %bb.0: # %entry
2087 ; MSA-NEXT: ld.d $w0, 0($4)
2088 ; MSA-NEXT: mini_s.d $w0, $w0, 2
2090 ; MSA-NEXT: st.d $w0, 0($4)
2092 ; MSA64N32-LABEL: mini_s_d:
2093 ; MSA64N32: # %bb.0: # %entry
2094 ; MSA64N32-NEXT: sll $1, $4, 0
2095 ; MSA64N32-NEXT: ld.d $w0, 0($1)
2096 ; MSA64N32-NEXT: mini_s.d $w0, $w0, 2
2097 ; MSA64N32-NEXT: jr $ra
2098 ; MSA64N32-NEXT: st.d $w0, 0($1)
2100 %a = load <2 x i64>, ptr %ptr, align 16
2101 %r = call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %a, i32 2)
2102 store <2 x i64> %r, ptr %ptr, align 16
2106 define void @mini_u_d(ptr %ptr) {
2107 ; MSA-LABEL: mini_u_d:
2108 ; MSA: # %bb.0: # %entry
2109 ; MSA-NEXT: ld.d $w0, 0($4)
2110 ; MSA-NEXT: mini_u.d $w0, $w0, 2
2112 ; MSA-NEXT: st.d $w0, 0($4)
2114 ; MSA64N32-LABEL: mini_u_d:
2115 ; MSA64N32: # %bb.0: # %entry
2116 ; MSA64N32-NEXT: sll $1, $4, 0
2117 ; MSA64N32-NEXT: ld.d $w0, 0($1)
2118 ; MSA64N32-NEXT: mini_u.d $w0, $w0, 2
2119 ; MSA64N32-NEXT: jr $ra
2120 ; MSA64N32-NEXT: st.d $w0, 0($1)
2122 %a = load <2 x i64>, ptr %ptr, align 16
2123 %r = call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %a, i32 2)
2124 store <2 x i64> %r, ptr %ptr, align 16
2128 define void @sldi_d(ptr %ptr) {
2129 ; MSA-LABEL: sldi_d:
2130 ; MSA: # %bb.0: # %entry
2131 ; MSA-NEXT: ld.d $w0, 0($4)
2132 ; MSA-NEXT: sldi.d $w0, $w0[1]
2134 ; MSA-NEXT: st.d $w0, 0($4)
2136 ; MSA64N32-LABEL: sldi_d:
2137 ; MSA64N32: # %bb.0: # %entry
2138 ; MSA64N32-NEXT: sll $1, $4, 0
2139 ; MSA64N32-NEXT: ld.d $w0, 0($1)
2140 ; MSA64N32-NEXT: sldi.d $w0, $w0[1]
2141 ; MSA64N32-NEXT: jr $ra
2142 ; MSA64N32-NEXT: st.d $w0, 0($1)
2144 %a = load <2 x i64>, ptr %ptr, align 16
2145 %r = call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %a, <2 x i64> %a, i32 1)
2146 store <2 x i64> %r, ptr %ptr, align 16
2150 define void @slli_d(ptr %ptr) {
2151 ; MSA-LABEL: slli_d:
2152 ; MSA: # %bb.0: # %entry
2153 ; MSA-NEXT: ld.d $w0, 0($4)
2154 ; MSA-NEXT: slli.d $w0, $w0, 3
2156 ; MSA-NEXT: st.d $w0, 0($4)
2158 ; MSA64N32-LABEL: slli_d:
2159 ; MSA64N32: # %bb.0: # %entry
2160 ; MSA64N32-NEXT: sll $1, $4, 0
2161 ; MSA64N32-NEXT: ld.d $w0, 0($1)
2162 ; MSA64N32-NEXT: slli.d $w0, $w0, 3
2163 ; MSA64N32-NEXT: jr $ra
2164 ; MSA64N32-NEXT: st.d $w0, 0($1)
2166 %a = load <2 x i64>, ptr %ptr, align 16
2167 %r = call <2 x i64> @llvm.mips.slli.d(<2 x i64> %a, i32 3)
2168 store <2 x i64> %r, ptr %ptr, align 16
2172 define void @srai_d(ptr %ptr) {
2173 ; MSA-LABEL: srai_d:
2174 ; MSA: # %bb.0: # %entry
2175 ; MSA-NEXT: ld.d $w0, 0($4)
2176 ; MSA-NEXT: srai.d $w0, $w0, 3
2178 ; MSA-NEXT: st.d $w0, 0($4)
2180 ; MSA64N32-LABEL: srai_d:
2181 ; MSA64N32: # %bb.0: # %entry
2182 ; MSA64N32-NEXT: sll $1, $4, 0
2183 ; MSA64N32-NEXT: ld.d $w0, 0($1)
2184 ; MSA64N32-NEXT: srai.d $w0, $w0, 3
2185 ; MSA64N32-NEXT: jr $ra
2186 ; MSA64N32-NEXT: st.d $w0, 0($1)
2188 %a = load <2 x i64>, ptr %ptr, align 16
2189 %r = call <2 x i64> @llvm.mips.srai.d(<2 x i64> %a, i32 3)
2190 store <2 x i64> %r, ptr %ptr, align 16
2194 define void @srari_d(ptr %ptr) {
2195 ; MSA-LABEL: srari_d:
2196 ; MSA: # %bb.0: # %entry
2197 ; MSA-NEXT: ld.d $w0, 0($4)
2198 ; MSA-NEXT: srari.d $w0, $w0, 3
2200 ; MSA-NEXT: st.d $w0, 0($4)
2202 ; MSA64N32-LABEL: srari_d:
2203 ; MSA64N32: # %bb.0: # %entry
2204 ; MSA64N32-NEXT: sll $1, $4, 0
2205 ; MSA64N32-NEXT: ld.d $w0, 0($1)
2206 ; MSA64N32-NEXT: srari.d $w0, $w0, 3
2207 ; MSA64N32-NEXT: jr $ra
2208 ; MSA64N32-NEXT: st.d $w0, 0($1)
2210 %a = load <2 x i64>, ptr %ptr, align 16
2211 %r = call <2 x i64> @llvm.mips.srari.d(<2 x i64> %a, i32 3)
2212 store <2 x i64> %r, ptr %ptr, align 16
2216 define void @srli_d(ptr %ptr) {
2217 ; MSA-LABEL: srli_d:
2218 ; MSA: # %bb.0: # %entry
2219 ; MSA-NEXT: ld.d $w0, 0($4)
2220 ; MSA-NEXT: srli.d $w0, $w0, 3
2222 ; MSA-NEXT: st.d $w0, 0($4)
2224 ; MSA64N32-LABEL: srli_d:
2225 ; MSA64N32: # %bb.0: # %entry
2226 ; MSA64N32-NEXT: sll $1, $4, 0
2227 ; MSA64N32-NEXT: ld.d $w0, 0($1)
2228 ; MSA64N32-NEXT: srli.d $w0, $w0, 3
2229 ; MSA64N32-NEXT: jr $ra
2230 ; MSA64N32-NEXT: st.d $w0, 0($1)
2232 %a = load <2 x i64>, ptr %ptr, align 16
2233 %r = call <2 x i64> @llvm.mips.srli.d(<2 x i64> %a, i32 3)
2234 store <2 x i64> %r, ptr %ptr, align 16
2238 define void @srlri_d(ptr %ptr) {
2239 ; MSA-LABEL: srlri_d:
2240 ; MSA: # %bb.0: # %entry
2241 ; MSA-NEXT: ld.d $w0, 0($4)
2242 ; MSA-NEXT: srlri.d $w0, $w0, 3
2244 ; MSA-NEXT: st.d $w0, 0($4)
2246 ; MSA64N32-LABEL: srlri_d:
2247 ; MSA64N32: # %bb.0: # %entry
2248 ; MSA64N32-NEXT: sll $1, $4, 0
2249 ; MSA64N32-NEXT: ld.d $w0, 0($1)
2250 ; MSA64N32-NEXT: srlri.d $w0, $w0, 3
2251 ; MSA64N32-NEXT: jr $ra
2252 ; MSA64N32-NEXT: st.d $w0, 0($1)
2254 %a = load <2 x i64>, ptr %ptr, align 16
2255 %r = call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %a, i32 3)
2256 store <2 x i64> %r, ptr %ptr, align 16
2260 define void @ld_d2(ptr %ptr, ptr %ldptr) {
2261 ; MSA32-LABEL: ld_d2:
2262 ; MSA32: # %bb.0: # %entry
2263 ; MSA32-NEXT: addiu $1, $5, 4096
2264 ; MSA32-NEXT: ld.d $w0, 0($1)
2265 ; MSA32-NEXT: jr $ra
2266 ; MSA32-NEXT: st.d $w0, 0($4)
2268 ; MSA64N32-LABEL: ld_d2:
2269 ; MSA64N32: # %bb.0: # %entry
2270 ; MSA64N32-NEXT: sll $1, $4, 0
2271 ; MSA64N32-NEXT: sll $2, $5, 0
2272 ; MSA64N32-NEXT: addiu $2, $2, 4096
2273 ; MSA64N32-NEXT: ld.d $w0, 0($2)
2274 ; MSA64N32-NEXT: jr $ra
2275 ; MSA64N32-NEXT: st.d $w0, 0($1)
2277 ; MSA64N64-LABEL: ld_d2:
2278 ; MSA64N64: # %bb.0: # %entry
2279 ; MSA64N64-NEXT: daddiu $1, $5, 4096
2280 ; MSA64N64-NEXT: ld.d $w0, 0($1)
2281 ; MSA64N64-NEXT: jr $ra
2282 ; MSA64N64-NEXT: st.d $w0, 0($4)
2284 %a = call <2 x i64> @llvm.mips.ld.d(ptr %ldptr, i32 4096)
2285 store <2 x i64> %a, ptr %ptr, align 16
2289 declare <8 x i16> @llvm.mips.ldi.h(i32)
2290 declare <8 x i16> @llvm.mips.addvi.h(<8 x i16>, i32)
2291 declare <8 x i16> @llvm.mips.bclri.h(<8 x i16>, i32)
2292 declare <8 x i16> @llvm.mips.binsli.h(<8 x i16>, <8 x i16>, i32)
2293 declare <8 x i16> @llvm.mips.binsri.h(<8 x i16>, <8 x i16>, i32)
2294 declare <8 x i16> @llvm.mips.bnegi.h(<8 x i16>, i32)
2295 declare <8 x i16> @llvm.mips.bseti.h(<8 x i16>, i32)
2296 declare <8 x i16> @llvm.mips.clei.s.h(<8 x i16>, i32)
2297 declare <8 x i16> @llvm.mips.clei.u.h(<8 x i16>, i32)
2298 declare <8 x i16> @llvm.mips.clti.s.h(<8 x i16>, i32)
2299 declare <8 x i16> @llvm.mips.clti.u.h(<8 x i16>, i32)
2300 declare <8 x i16> @llvm.mips.maxi.s.h(<8 x i16>, i32)
2301 declare <8 x i16> @llvm.mips.maxi.u.h(<8 x i16>, i32)
2302 declare <8 x i16> @llvm.mips.mini.s.h(<8 x i16>, i32)
2303 declare <8 x i16> @llvm.mips.mini.u.h(<8 x i16>, i32)
2304 declare <8 x i16> @llvm.mips.sldi.h(<8 x i16>, <8 x i16>, i32)
2305 declare <8 x i16> @llvm.mips.slli.h(<8 x i16>, i32)
2306 declare <8 x i16> @llvm.mips.splati.h(<8 x i16>, i32)
2307 declare <8 x i16> @llvm.mips.srai.h(<8 x i16>, i32)
2308 declare <8 x i16> @llvm.mips.srari.h(<8 x i16>, i32)
2309 declare <8 x i16> @llvm.mips.srli.h(<8 x i16>, i32)
2310 declare <8 x i16> @llvm.mips.srlri.h(<8 x i16>, i32)
2311 declare <4 x i32> @llvm.mips.addvi.w(<4 x i32>, i32)
2312 declare <4 x i32> @llvm.mips.bclri.w(<4 x i32>, i32)
2313 declare <4 x i32> @llvm.mips.binsli.w(<4 x i32>, <4 x i32>, i32)
2314 declare <4 x i32> @llvm.mips.binsri.w(<4 x i32>, <4 x i32>, i32)
2315 declare <4 x i32> @llvm.mips.bnegi.w(<4 x i32>, i32)
2316 declare <4 x i32> @llvm.mips.bseti.w(<4 x i32>, i32)
2317 declare <4 x i32> @llvm.mips.ldi.w(i32)
2318 declare <4 x i32> @llvm.mips.clei.s.w(<4 x i32>, i32)
2319 declare <4 x i32> @llvm.mips.clei.u.w(<4 x i32>, i32)
2320 declare <4 x i32> @llvm.mips.clti.s.w(<4 x i32>, i32)
2321 declare <4 x i32> @llvm.mips.clti.u.w(<4 x i32>, i32)
2322 declare <4 x i32> @llvm.mips.maxi.s.w(<4 x i32>, i32)
2323 declare <4 x i32> @llvm.mips.maxi.u.w(<4 x i32>, i32)
2324 declare <4 x i32> @llvm.mips.mini.s.w(<4 x i32>, i32)
2325 declare <4 x i32> @llvm.mips.mini.u.w(<4 x i32>, i32)
2326 declare <4 x i32> @llvm.mips.sldi.w(<4 x i32>, <4 x i32>, i32)
2327 declare <4 x i32> @llvm.mips.slli.w(<4 x i32>, i32)
2328 declare <4 x i32> @llvm.mips.splati.w(<4 x i32>, i32)
2329 declare <4 x i32> @llvm.mips.srai.w(<4 x i32>, i32)
2330 declare <4 x i32> @llvm.mips.srari.w(<4 x i32>, i32)
2331 declare <4 x i32> @llvm.mips.srli.w(<4 x i32>, i32)
2332 declare <4 x i32> @llvm.mips.srlri.w(<4 x i32>, i32)
2333 declare <2 x i64> @llvm.mips.ldi.d(i32)
2334 declare <2 x i64> @llvm.mips.addvi.d(<2 x i64>, i32)
2335 declare <2 x i64> @llvm.mips.bclri.d(<2 x i64>, i32)
2336 declare <2 x i64> @llvm.mips.binsli.d(<2 x i64>, <2 x i64>, i32)
2337 declare <2 x i64> @llvm.mips.binsri.d(<2 x i64>, <2 x i64>, i32)
2338 declare <2 x i64> @llvm.mips.bnegi.d(<2 x i64>, i32)
2339 declare <2 x i64> @llvm.mips.bseti.d(<2 x i64>, i32)
2340 declare <2 x i64> @llvm.mips.clei.s.d(<2 x i64>, i32)
2341 declare <2 x i64> @llvm.mips.clei.u.d(<2 x i64>, i32)
2342 declare <2 x i64> @llvm.mips.clti.s.d(<2 x i64>, i32)
2343 declare <2 x i64> @llvm.mips.clti.u.d(<2 x i64>, i32)
2344 declare <2 x i64> @llvm.mips.maxi.s.d(<2 x i64>, i32)
2345 declare <2 x i64> @llvm.mips.maxi.u.d(<2 x i64>, i32)
2346 declare <2 x i64> @llvm.mips.mini.s.d(<2 x i64>, i32)
2347 declare <2 x i64> @llvm.mips.mini.u.d(<2 x i64>, i32)
2348 declare <2 x i64> @llvm.mips.sldi.d(<2 x i64>, <2 x i64>, i32)
2349 declare <2 x i64> @llvm.mips.slli.d(<2 x i64>, i32)
2350 declare <2 x i64> @llvm.mips.splati.d(<2 x i64>, i32)
2351 declare <2 x i64> @llvm.mips.srai.d(<2 x i64>, i32)
2352 declare <2 x i64> @llvm.mips.srari.d(<2 x i64>, i32)
2353 declare <2 x i64> @llvm.mips.srli.d(<2 x i64>, i32)
2354 declare <2 x i64> @llvm.mips.srlri.d(<2 x i64>, i32)
2355 declare <16 x i8> @llvm.mips.ldi.b(i32)
2356 declare <16 x i8> @llvm.mips.addvi.b(<16 x i8>, i32)
2357 declare <16 x i8> @llvm.mips.andi.b(<16 x i8>, i32)
2358 declare <16 x i8> @llvm.mips.bclri.b(<16 x i8>, i32)
2359 declare <16 x i8> @llvm.mips.binsli.b(<16 x i8>, <16 x i8>, i32)
2360 declare <16 x i8> @llvm.mips.binsri.b(<16 x i8>, <16 x i8>, i32)
2361 declare <16 x i8> @llvm.mips.bmnzi.b(<16 x i8>, <16 x i8>, i32)
2362 declare <16 x i8> @llvm.mips.bnegi.b(<16 x i8>, i32)
2363 declare <16 x i8> @llvm.mips.bseli.b(<16 x i8>, <16 x i8>, i32)
2364 declare <16 x i8> @llvm.mips.bseti.b(<16 x i8>, i32)
2365 declare <16 x i8> @llvm.mips.clei.s.b(<16 x i8>, i32)
2366 declare <16 x i8> @llvm.mips.clei.u.b(<16 x i8>, i32)
2367 declare <16 x i8> @llvm.mips.clti.s.b(<16 x i8>, i32)
2368 declare <16 x i8> @llvm.mips.clti.u.b(<16 x i8>, i32)
2369 declare <16 x i8> @llvm.mips.maxi.s.b(<16 x i8>, i32)
2370 declare <16 x i8> @llvm.mips.maxi.u.b(<16 x i8>, i32)
2371 declare <16 x i8> @llvm.mips.mini.s.b(<16 x i8>, i32)
2372 declare <16 x i8> @llvm.mips.mini.u.b(<16 x i8>, i32)
2373 declare <16 x i8> @llvm.mips.nori.b(<16 x i8>, i32)
2374 declare <16 x i8> @llvm.mips.ori.b(<16 x i8>, i32)
2375 declare <16 x i8> @llvm.mips.sldi.b(<16 x i8>, <16 x i8>, i32)
2376 declare <16 x i8> @llvm.mips.slli.b(<16 x i8>, i32)
2377 declare <16 x i8> @llvm.mips.splati.b(<16 x i8>, i32)
2378 declare <16 x i8> @llvm.mips.srai.b(<16 x i8>, i32)
2379 declare <16 x i8> @llvm.mips.srari.b(<16 x i8>, i32)
2380 declare <16 x i8> @llvm.mips.srli.b(<16 x i8>, i32)
2381 declare <16 x i8> @llvm.mips.srlri.b(<16 x i8>, i32)
2382 declare i32 @llvm.mips.copy.s.h(<8 x i16>, i32)
2383 declare i32 @llvm.mips.copy.u.h(<8 x i16>, i32)
2384 declare i32 @llvm.mips.copy.s.w(<4 x i32>, i32)
2385 declare i32 @llvm.mips.copy.u.w(<4 x i32>, i32)
2386 declare i64 @llvm.mips.copy.s.d(<2 x i64>, i32)
2387 declare i64 @llvm.mips.copy.u.d(<2 x i64>, i32)
2388 declare i32 @llvm.mips.copy.s.b(<16 x i8>, i32)
2389 declare i32 @llvm.mips.copy.u.b(<16 x i8>, i32)
2390 declare <16 x i8> @llvm.mips.bmzi.b(<16 x i8>, <16 x i8>, i32)
2391 declare <16 x i8> @llvm.mips.ld.b(ptr, i32)
2392 declare <8 x i16> @llvm.mips.ld.h(ptr, i32)
2393 declare <4 x i32> @llvm.mips.ld.w(ptr, i32)
2394 declare <2 x i64> @llvm.mips.ld.d(ptr, i32)
2395 declare void @llvm.mips.st.b(<16 x i8>, ptr, i32)
2396 declare void @llvm.mips.st.h(<8 x i16>, ptr, i32)
2397 declare void @llvm.mips.st.w(<4 x i32>, ptr, i32)
2398 declare void @llvm.mips.st.d(<2 x i64>, ptr, i32)