1 ; Test the MSA intrinsics that are encoded with the VEC instruction format.
3 ; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 -relocation-model=pic < %s \
4 ; RUN: | FileCheck -check-prefix=ANYENDIAN %s
5 ; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 -relocation-model=pic < %s \
6 ; RUN: | FileCheck -check-prefix=ANYENDIAN %s
8 @llvm_mips_and_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
9 @llvm_mips_and_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
10 @llvm_mips_and_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
12 define void @llvm_mips_and_v_b_test() nounwind {
14 %0 = load <16 x i8>, ptr @llvm_mips_and_v_b_ARG1
15 %1 = load <16 x i8>, ptr @llvm_mips_and_v_b_ARG2
16 %2 = bitcast <16 x i8> %0 to <16 x i8>
17 %3 = bitcast <16 x i8> %1 to <16 x i8>
18 %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
19 %5 = bitcast <16 x i8> %4 to <16 x i8>
20 store <16 x i8> %5, ptr @llvm_mips_and_v_b_RES
24 ; ANYENDIAN: llvm_mips_and_v_b_test:
29 ; ANYENDIAN: .size llvm_mips_and_v_b_test
31 @llvm_mips_and_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
32 @llvm_mips_and_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
33 @llvm_mips_and_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
35 define void @llvm_mips_and_v_h_test() nounwind {
37 %0 = load <8 x i16>, ptr @llvm_mips_and_v_h_ARG1
38 %1 = load <8 x i16>, ptr @llvm_mips_and_v_h_ARG2
39 %2 = bitcast <8 x i16> %0 to <16 x i8>
40 %3 = bitcast <8 x i16> %1 to <16 x i8>
41 %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
42 %5 = bitcast <16 x i8> %4 to <8 x i16>
43 store <8 x i16> %5, ptr @llvm_mips_and_v_h_RES
47 ; ANYENDIAN: llvm_mips_and_v_h_test:
52 ; ANYENDIAN: .size llvm_mips_and_v_h_test
54 @llvm_mips_and_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
55 @llvm_mips_and_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
56 @llvm_mips_and_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
58 define void @llvm_mips_and_v_w_test() nounwind {
60 %0 = load <4 x i32>, ptr @llvm_mips_and_v_w_ARG1
61 %1 = load <4 x i32>, ptr @llvm_mips_and_v_w_ARG2
62 %2 = bitcast <4 x i32> %0 to <16 x i8>
63 %3 = bitcast <4 x i32> %1 to <16 x i8>
64 %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
65 %5 = bitcast <16 x i8> %4 to <4 x i32>
66 store <4 x i32> %5, ptr @llvm_mips_and_v_w_RES
70 ; ANYENDIAN: llvm_mips_and_v_w_test:
75 ; ANYENDIAN: .size llvm_mips_and_v_w_test
77 @llvm_mips_and_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
78 @llvm_mips_and_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
79 @llvm_mips_and_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
81 define void @llvm_mips_and_v_d_test() nounwind {
83 %0 = load <2 x i64>, ptr @llvm_mips_and_v_d_ARG1
84 %1 = load <2 x i64>, ptr @llvm_mips_and_v_d_ARG2
85 %2 = bitcast <2 x i64> %0 to <16 x i8>
86 %3 = bitcast <2 x i64> %1 to <16 x i8>
87 %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
88 %5 = bitcast <16 x i8> %4 to <2 x i64>
89 store <2 x i64> %5, ptr @llvm_mips_and_v_d_RES
93 ; ANYENDIAN: llvm_mips_and_v_d_test:
98 ; ANYENDIAN: .size llvm_mips_and_v_d_test
100 define void @and_v_b_test() nounwind {
102 %0 = load <16 x i8>, ptr @llvm_mips_and_v_b_ARG1
103 %1 = load <16 x i8>, ptr @llvm_mips_and_v_b_ARG2
104 %2 = and <16 x i8> %0, %1
105 store <16 x i8> %2, ptr @llvm_mips_and_v_b_RES
109 ; ANYENDIAN: and_v_b_test:
114 ; ANYENDIAN: .size and_v_b_test
116 define void @and_v_h_test() nounwind {
118 %0 = load <8 x i16>, ptr @llvm_mips_and_v_h_ARG1
119 %1 = load <8 x i16>, ptr @llvm_mips_and_v_h_ARG2
120 %2 = and <8 x i16> %0, %1
121 store <8 x i16> %2, ptr @llvm_mips_and_v_h_RES
125 ; ANYENDIAN: and_v_h_test:
130 ; ANYENDIAN: .size and_v_h_test
133 define void @and_v_w_test() nounwind {
135 %0 = load <4 x i32>, ptr @llvm_mips_and_v_w_ARG1
136 %1 = load <4 x i32>, ptr @llvm_mips_and_v_w_ARG2
137 %2 = and <4 x i32> %0, %1
138 store <4 x i32> %2, ptr @llvm_mips_and_v_w_RES
142 ; ANYENDIAN: and_v_w_test:
147 ; ANYENDIAN: .size and_v_w_test
150 define void @and_v_d_test() nounwind {
152 %0 = load <2 x i64>, ptr @llvm_mips_and_v_d_ARG1
153 %1 = load <2 x i64>, ptr @llvm_mips_and_v_d_ARG2
154 %2 = and <2 x i64> %0, %1
155 store <2 x i64> %2, ptr @llvm_mips_and_v_d_RES
159 ; ANYENDIAN: and_v_d_test:
164 ; ANYENDIAN: .size and_v_d_test
166 @llvm_mips_bmnz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
167 @llvm_mips_bmnz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
168 @llvm_mips_bmnz_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
169 @llvm_mips_bmnz_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
171 define void @llvm_mips_bmnz_v_b_test() nounwind {
173 %0 = load <16 x i8>, ptr @llvm_mips_bmnz_v_b_ARG1
174 %1 = load <16 x i8>, ptr @llvm_mips_bmnz_v_b_ARG2
175 %2 = load <16 x i8>, ptr @llvm_mips_bmnz_v_b_ARG3
176 %3 = bitcast <16 x i8> %0 to <16 x i8>
177 %4 = bitcast <16 x i8> %1 to <16 x i8>
178 %5 = bitcast <16 x i8> %2 to <16 x i8>
179 %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
180 %7 = bitcast <16 x i8> %6 to <16 x i8>
181 store <16 x i8> %7, ptr @llvm_mips_bmnz_v_b_RES
185 ; ANYENDIAN: llvm_mips_bmnz_v_b_test:
186 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG1)(
187 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG2)(
188 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG3)(
189 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
190 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
191 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
192 ; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
193 ; ANYENDIAN-DAG: st.b [[R4]], 0(
194 ; ANYENDIAN: .size llvm_mips_bmnz_v_b_test
196 @llvm_mips_bmnz_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
197 @llvm_mips_bmnz_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
198 @llvm_mips_bmnz_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
199 @llvm_mips_bmnz_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
201 define void @llvm_mips_bmnz_v_h_test() nounwind {
203 %0 = load <8 x i16>, ptr @llvm_mips_bmnz_v_h_ARG1
204 %1 = load <8 x i16>, ptr @llvm_mips_bmnz_v_h_ARG2
205 %2 = load <8 x i16>, ptr @llvm_mips_bmnz_v_h_ARG3
206 %3 = bitcast <8 x i16> %0 to <16 x i8>
207 %4 = bitcast <8 x i16> %1 to <16 x i8>
208 %5 = bitcast <8 x i16> %2 to <16 x i8>
209 %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
210 %7 = bitcast <16 x i8> %6 to <8 x i16>
211 store <8 x i16> %7, ptr @llvm_mips_bmnz_v_h_RES
215 ; ANYENDIAN: llvm_mips_bmnz_v_h_test:
216 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG1)(
217 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG2)(
218 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG3)(
219 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
220 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
221 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
222 ; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
223 ; ANYENDIAN-DAG: st.b [[R4]], 0(
224 ; ANYENDIAN: .size llvm_mips_bmnz_v_h_test
226 @llvm_mips_bmnz_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
227 @llvm_mips_bmnz_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
228 @llvm_mips_bmnz_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
229 @llvm_mips_bmnz_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
231 define void @llvm_mips_bmnz_v_w_test() nounwind {
233 %0 = load <4 x i32>, ptr @llvm_mips_bmnz_v_w_ARG1
234 %1 = load <4 x i32>, ptr @llvm_mips_bmnz_v_w_ARG2
235 %2 = load <4 x i32>, ptr @llvm_mips_bmnz_v_w_ARG3
236 %3 = bitcast <4 x i32> %0 to <16 x i8>
237 %4 = bitcast <4 x i32> %1 to <16 x i8>
238 %5 = bitcast <4 x i32> %2 to <16 x i8>
239 %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
240 %7 = bitcast <16 x i8> %6 to <4 x i32>
241 store <4 x i32> %7, ptr @llvm_mips_bmnz_v_w_RES
245 ; ANYENDIAN: llvm_mips_bmnz_v_w_test:
246 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG1)(
247 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG2)(
248 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG3)(
249 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
250 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
251 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
252 ; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
253 ; ANYENDIAN-DAG: st.b [[R4]], 0(
254 ; ANYENDIAN: .size llvm_mips_bmnz_v_w_test
256 @llvm_mips_bmnz_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
257 @llvm_mips_bmnz_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
258 @llvm_mips_bmnz_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
259 @llvm_mips_bmnz_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
261 define void @llvm_mips_bmnz_v_d_test() nounwind {
263 %0 = load <2 x i64>, ptr @llvm_mips_bmnz_v_d_ARG1
264 %1 = load <2 x i64>, ptr @llvm_mips_bmnz_v_d_ARG2
265 %2 = load <2 x i64>, ptr @llvm_mips_bmnz_v_d_ARG3
266 %3 = bitcast <2 x i64> %0 to <16 x i8>
267 %4 = bitcast <2 x i64> %1 to <16 x i8>
268 %5 = bitcast <2 x i64> %2 to <16 x i8>
269 %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
270 %7 = bitcast <16 x i8> %6 to <2 x i64>
271 store <2 x i64> %7, ptr @llvm_mips_bmnz_v_d_RES
275 ; ANYENDIAN: llvm_mips_bmnz_v_d_test:
276 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG1)(
277 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG2)(
278 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG3)(
279 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
280 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
281 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
282 ; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
283 ; ANYENDIAN-DAG: st.b [[R4]], 0(
284 ; ANYENDIAN: .size llvm_mips_bmnz_v_d_test
286 @llvm_mips_bmz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
287 @llvm_mips_bmz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
288 @llvm_mips_bmz_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
289 @llvm_mips_bmz_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
291 define void @llvm_mips_bmz_v_b_test() nounwind {
293 %0 = load <16 x i8>, ptr @llvm_mips_bmz_v_b_ARG1
294 %1 = load <16 x i8>, ptr @llvm_mips_bmz_v_b_ARG2
295 %2 = load <16 x i8>, ptr @llvm_mips_bmz_v_b_ARG3
296 %3 = bitcast <16 x i8> %0 to <16 x i8>
297 %4 = bitcast <16 x i8> %1 to <16 x i8>
298 %5 = bitcast <16 x i8> %2 to <16 x i8>
299 %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
300 %7 = bitcast <16 x i8> %6 to <16 x i8>
301 store <16 x i8> %7, ptr @llvm_mips_bmz_v_b_RES
305 ; ANYENDIAN: llvm_mips_bmz_v_b_test:
306 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG1)(
307 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG2)(
308 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG3)(
309 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
310 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
311 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
312 ; bmnz.v is the same as bmz.v with ws and wd_in swapped
313 ; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
314 ; ANYENDIAN-DAG: st.b [[R5]], 0(
315 ; ANYENDIAN: .size llvm_mips_bmz_v_b_test
317 @llvm_mips_bmz_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
318 @llvm_mips_bmz_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
319 @llvm_mips_bmz_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
320 @llvm_mips_bmz_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
322 define void @llvm_mips_bmz_v_h_test() nounwind {
324 %0 = load <8 x i16>, ptr @llvm_mips_bmz_v_h_ARG1
325 %1 = load <8 x i16>, ptr @llvm_mips_bmz_v_h_ARG2
326 %2 = load <8 x i16>, ptr @llvm_mips_bmz_v_h_ARG3
327 %3 = bitcast <8 x i16> %0 to <16 x i8>
328 %4 = bitcast <8 x i16> %1 to <16 x i8>
329 %5 = bitcast <8 x i16> %2 to <16 x i8>
330 %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
331 %7 = bitcast <16 x i8> %6 to <8 x i16>
332 store <8 x i16> %7, ptr @llvm_mips_bmz_v_h_RES
336 ; ANYENDIAN: llvm_mips_bmz_v_h_test:
337 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG1)(
338 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG2)(
339 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG3)(
340 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
341 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
342 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
343 ; bmnz.v is the same as bmz.v with ws and wd_in swapped
344 ; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
345 ; ANYENDIAN-DAG: st.b [[R5]], 0(
346 ; ANYENDIAN: .size llvm_mips_bmz_v_h_test
348 @llvm_mips_bmz_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
349 @llvm_mips_bmz_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
350 @llvm_mips_bmz_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
351 @llvm_mips_bmz_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
353 define void @llvm_mips_bmz_v_w_test() nounwind {
355 %0 = load <4 x i32>, ptr @llvm_mips_bmz_v_w_ARG1
356 %1 = load <4 x i32>, ptr @llvm_mips_bmz_v_w_ARG2
357 %2 = load <4 x i32>, ptr @llvm_mips_bmz_v_w_ARG3
358 %3 = bitcast <4 x i32> %0 to <16 x i8>
359 %4 = bitcast <4 x i32> %1 to <16 x i8>
360 %5 = bitcast <4 x i32> %2 to <16 x i8>
361 %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
362 %7 = bitcast <16 x i8> %6 to <4 x i32>
363 store <4 x i32> %7, ptr @llvm_mips_bmz_v_w_RES
367 ; ANYENDIAN: llvm_mips_bmz_v_w_test:
368 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG1)(
369 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG2)(
370 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG3)(
371 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
372 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
373 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
374 ; bmnz.v is the same as bmz.v with ws and wd_in swapped
375 ; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
376 ; ANYENDIAN-DAG: st.b [[R5]], 0(
377 ; ANYENDIAN: .size llvm_mips_bmz_v_w_test
379 @llvm_mips_bmz_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
380 @llvm_mips_bmz_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
381 @llvm_mips_bmz_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
382 @llvm_mips_bmz_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
384 define void @llvm_mips_bmz_v_d_test() nounwind {
386 %0 = load <2 x i64>, ptr @llvm_mips_bmz_v_d_ARG1
387 %1 = load <2 x i64>, ptr @llvm_mips_bmz_v_d_ARG2
388 %2 = load <2 x i64>, ptr @llvm_mips_bmz_v_d_ARG3
389 %3 = bitcast <2 x i64> %0 to <16 x i8>
390 %4 = bitcast <2 x i64> %1 to <16 x i8>
391 %5 = bitcast <2 x i64> %2 to <16 x i8>
392 %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
393 %7 = bitcast <16 x i8> %6 to <2 x i64>
394 store <2 x i64> %7, ptr @llvm_mips_bmz_v_d_RES
398 ; ANYENDIAN: llvm_mips_bmz_v_d_test:
399 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG1)(
400 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG2)(
401 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG3)(
402 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
403 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
404 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
405 ; bmnz.v is the same as bmz.v with ws and wd_in swapped
406 ; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
407 ; ANYENDIAN-DAG: st.b [[R5]], 0(
408 ; ANYENDIAN: .size llvm_mips_bmz_v_d_test
410 @llvm_mips_bsel_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
411 @llvm_mips_bsel_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
412 @llvm_mips_bsel_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
413 @llvm_mips_bsel_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
415 define void @llvm_mips_bsel_v_b_test() nounwind {
417 %0 = load <16 x i8>, ptr @llvm_mips_bsel_v_b_ARG1
418 %1 = load <16 x i8>, ptr @llvm_mips_bsel_v_b_ARG2
419 %2 = load <16 x i8>, ptr @llvm_mips_bsel_v_b_ARG3
420 %3 = bitcast <16 x i8> %0 to <16 x i8>
421 %4 = bitcast <16 x i8> %1 to <16 x i8>
422 %5 = bitcast <16 x i8> %2 to <16 x i8>
423 %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
424 %7 = bitcast <16 x i8> %6 to <16 x i8>
425 store <16 x i8> %7, ptr @llvm_mips_bsel_v_b_RES
429 ; ANYENDIAN: llvm_mips_bsel_v_b_test:
430 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG1)(
431 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG2)(
432 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG3)(
433 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
434 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
435 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
436 ; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in)
437 ; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]]
438 ; ANYENDIAN-DAG: st.b [[R5]], 0(
439 ; ANYENDIAN: .size llvm_mips_bsel_v_b_test
441 @llvm_mips_bsel_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
442 @llvm_mips_bsel_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
443 @llvm_mips_bsel_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
444 @llvm_mips_bsel_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
446 define void @llvm_mips_bsel_v_h_test() nounwind {
448 %0 = load <8 x i16>, ptr @llvm_mips_bsel_v_h_ARG1
449 %1 = load <8 x i16>, ptr @llvm_mips_bsel_v_h_ARG2
450 %2 = load <8 x i16>, ptr @llvm_mips_bsel_v_h_ARG3
451 %3 = bitcast <8 x i16> %0 to <16 x i8>
452 %4 = bitcast <8 x i16> %1 to <16 x i8>
453 %5 = bitcast <8 x i16> %2 to <16 x i8>
454 %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
455 %7 = bitcast <16 x i8> %6 to <8 x i16>
456 store <8 x i16> %7, ptr @llvm_mips_bsel_v_h_RES
460 ; ANYENDIAN: llvm_mips_bsel_v_h_test:
461 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG1)(
462 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG2)(
463 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG3)(
464 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
465 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
466 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
467 ; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in)
468 ; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]]
469 ; ANYENDIAN-DAG: st.b [[R5]], 0(
470 ; ANYENDIAN: .size llvm_mips_bsel_v_h_test
472 @llvm_mips_bsel_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
473 @llvm_mips_bsel_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
474 @llvm_mips_bsel_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
475 @llvm_mips_bsel_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
477 define void @llvm_mips_bsel_v_w_test() nounwind {
479 %0 = load <4 x i32>, ptr @llvm_mips_bsel_v_w_ARG1
480 %1 = load <4 x i32>, ptr @llvm_mips_bsel_v_w_ARG2
481 %2 = load <4 x i32>, ptr @llvm_mips_bsel_v_w_ARG3
482 %3 = bitcast <4 x i32> %0 to <16 x i8>
483 %4 = bitcast <4 x i32> %1 to <16 x i8>
484 %5 = bitcast <4 x i32> %2 to <16 x i8>
485 %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
486 %7 = bitcast <16 x i8> %6 to <4 x i32>
487 store <4 x i32> %7, ptr @llvm_mips_bsel_v_w_RES
491 ; ANYENDIAN: llvm_mips_bsel_v_w_test:
492 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG1)(
493 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG2)(
494 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG3)(
495 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
496 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
497 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
498 ; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in)
499 ; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]]
500 ; ANYENDIAN-DAG: st.b [[R5]], 0(
501 ; ANYENDIAN: .size llvm_mips_bsel_v_w_test
503 @llvm_mips_bsel_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
504 @llvm_mips_bsel_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
505 @llvm_mips_bsel_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
506 @llvm_mips_bsel_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
508 define void @llvm_mips_bsel_v_d_test() nounwind {
510 %0 = load <2 x i64>, ptr @llvm_mips_bsel_v_d_ARG1
511 %1 = load <2 x i64>, ptr @llvm_mips_bsel_v_d_ARG2
512 %2 = load <2 x i64>, ptr @llvm_mips_bsel_v_d_ARG3
513 %3 = bitcast <2 x i64> %0 to <16 x i8>
514 %4 = bitcast <2 x i64> %1 to <16 x i8>
515 %5 = bitcast <2 x i64> %2 to <16 x i8>
516 %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
517 %7 = bitcast <16 x i8> %6 to <2 x i64>
518 store <2 x i64> %7, ptr @llvm_mips_bsel_v_d_RES
522 ; ANYENDIAN: llvm_mips_bsel_v_d_test:
523 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG1)(
524 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG2)(
525 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG3)(
526 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
527 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
528 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
529 ; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in)
530 ; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]]
531 ; ANYENDIAN-DAG: st.b [[R5]], 0(
532 ; ANYENDIAN: .size llvm_mips_bsel_v_d_test
534 @llvm_mips_nor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
535 @llvm_mips_nor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
536 @llvm_mips_nor_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
538 define void @llvm_mips_nor_v_b_test() nounwind {
540 %0 = load <16 x i8>, ptr @llvm_mips_nor_v_b_ARG1
541 %1 = load <16 x i8>, ptr @llvm_mips_nor_v_b_ARG2
542 %2 = bitcast <16 x i8> %0 to <16 x i8>
543 %3 = bitcast <16 x i8> %1 to <16 x i8>
544 %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
545 %5 = bitcast <16 x i8> %4 to <16 x i8>
546 store <16 x i8> %5, ptr @llvm_mips_nor_v_b_RES
550 ; ANYENDIAN: llvm_mips_nor_v_b_test:
555 ; ANYENDIAN: .size llvm_mips_nor_v_b_test
557 @llvm_mips_nor_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
558 @llvm_mips_nor_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
559 @llvm_mips_nor_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
561 define void @llvm_mips_nor_v_h_test() nounwind {
563 %0 = load <8 x i16>, ptr @llvm_mips_nor_v_h_ARG1
564 %1 = load <8 x i16>, ptr @llvm_mips_nor_v_h_ARG2
565 %2 = bitcast <8 x i16> %0 to <16 x i8>
566 %3 = bitcast <8 x i16> %1 to <16 x i8>
567 %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
568 %5 = bitcast <16 x i8> %4 to <8 x i16>
569 store <8 x i16> %5, ptr @llvm_mips_nor_v_h_RES
573 ; ANYENDIAN: llvm_mips_nor_v_h_test:
578 ; ANYENDIAN: .size llvm_mips_nor_v_h_test
580 @llvm_mips_nor_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
581 @llvm_mips_nor_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
582 @llvm_mips_nor_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
584 define void @llvm_mips_nor_v_w_test() nounwind {
586 %0 = load <4 x i32>, ptr @llvm_mips_nor_v_w_ARG1
587 %1 = load <4 x i32>, ptr @llvm_mips_nor_v_w_ARG2
588 %2 = bitcast <4 x i32> %0 to <16 x i8>
589 %3 = bitcast <4 x i32> %1 to <16 x i8>
590 %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
591 %5 = bitcast <16 x i8> %4 to <4 x i32>
592 store <4 x i32> %5, ptr @llvm_mips_nor_v_w_RES
596 ; ANYENDIAN: llvm_mips_nor_v_w_test:
601 ; ANYENDIAN: .size llvm_mips_nor_v_w_test
603 @llvm_mips_nor_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
604 @llvm_mips_nor_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
605 @llvm_mips_nor_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
607 define void @llvm_mips_nor_v_d_test() nounwind {
609 %0 = load <2 x i64>, ptr @llvm_mips_nor_v_d_ARG1
610 %1 = load <2 x i64>, ptr @llvm_mips_nor_v_d_ARG2
611 %2 = bitcast <2 x i64> %0 to <16 x i8>
612 %3 = bitcast <2 x i64> %1 to <16 x i8>
613 %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
614 %5 = bitcast <16 x i8> %4 to <2 x i64>
615 store <2 x i64> %5, ptr @llvm_mips_nor_v_d_RES
619 ; ANYENDIAN: llvm_mips_nor_v_d_test:
624 ; ANYENDIAN: .size llvm_mips_nor_v_d_test
626 @llvm_mips_or_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
627 @llvm_mips_or_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
628 @llvm_mips_or_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
630 define void @llvm_mips_or_v_b_test() nounwind {
632 %0 = load <16 x i8>, ptr @llvm_mips_or_v_b_ARG1
633 %1 = load <16 x i8>, ptr @llvm_mips_or_v_b_ARG2
634 %2 = bitcast <16 x i8> %0 to <16 x i8>
635 %3 = bitcast <16 x i8> %1 to <16 x i8>
636 %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
637 %5 = bitcast <16 x i8> %4 to <16 x i8>
638 store <16 x i8> %5, ptr @llvm_mips_or_v_b_RES
642 ; ANYENDIAN: llvm_mips_or_v_b_test:
647 ; ANYENDIAN: .size llvm_mips_or_v_b_test
649 @llvm_mips_or_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
650 @llvm_mips_or_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
651 @llvm_mips_or_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
653 define void @llvm_mips_or_v_h_test() nounwind {
655 %0 = load <8 x i16>, ptr @llvm_mips_or_v_h_ARG1
656 %1 = load <8 x i16>, ptr @llvm_mips_or_v_h_ARG2
657 %2 = bitcast <8 x i16> %0 to <16 x i8>
658 %3 = bitcast <8 x i16> %1 to <16 x i8>
659 %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
660 %5 = bitcast <16 x i8> %4 to <8 x i16>
661 store <8 x i16> %5, ptr @llvm_mips_or_v_h_RES
665 ; ANYENDIAN: llvm_mips_or_v_h_test:
670 ; ANYENDIAN: .size llvm_mips_or_v_h_test
672 @llvm_mips_or_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
673 @llvm_mips_or_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
674 @llvm_mips_or_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
676 define void @llvm_mips_or_v_w_test() nounwind {
678 %0 = load <4 x i32>, ptr @llvm_mips_or_v_w_ARG1
679 %1 = load <4 x i32>, ptr @llvm_mips_or_v_w_ARG2
680 %2 = bitcast <4 x i32> %0 to <16 x i8>
681 %3 = bitcast <4 x i32> %1 to <16 x i8>
682 %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
683 %5 = bitcast <16 x i8> %4 to <4 x i32>
684 store <4 x i32> %5, ptr @llvm_mips_or_v_w_RES
688 ; ANYENDIAN: llvm_mips_or_v_w_test:
693 ; ANYENDIAN: .size llvm_mips_or_v_w_test
695 @llvm_mips_or_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
696 @llvm_mips_or_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
697 @llvm_mips_or_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
699 define void @llvm_mips_or_v_d_test() nounwind {
701 %0 = load <2 x i64>, ptr @llvm_mips_or_v_d_ARG1
702 %1 = load <2 x i64>, ptr @llvm_mips_or_v_d_ARG2
703 %2 = bitcast <2 x i64> %0 to <16 x i8>
704 %3 = bitcast <2 x i64> %1 to <16 x i8>
705 %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
706 %5 = bitcast <16 x i8> %4 to <2 x i64>
707 store <2 x i64> %5, ptr @llvm_mips_or_v_d_RES
711 ; ANYENDIAN: llvm_mips_or_v_d_test:
716 ; ANYENDIAN: .size llvm_mips_or_v_d_test
718 define void @or_v_b_test() nounwind {
720 %0 = load <16 x i8>, ptr @llvm_mips_or_v_b_ARG1
721 %1 = load <16 x i8>, ptr @llvm_mips_or_v_b_ARG2
722 %2 = or <16 x i8> %0, %1
723 store <16 x i8> %2, ptr @llvm_mips_or_v_b_RES
727 ; ANYENDIAN: or_v_b_test:
732 ; ANYENDIAN: .size or_v_b_test
734 define void @or_v_h_test() nounwind {
736 %0 = load <8 x i16>, ptr @llvm_mips_or_v_h_ARG1
737 %1 = load <8 x i16>, ptr @llvm_mips_or_v_h_ARG2
738 %2 = or <8 x i16> %0, %1
739 store <8 x i16> %2, ptr @llvm_mips_or_v_h_RES
743 ; ANYENDIAN: or_v_h_test:
748 ; ANYENDIAN: .size or_v_h_test
751 define void @or_v_w_test() nounwind {
753 %0 = load <4 x i32>, ptr @llvm_mips_or_v_w_ARG1
754 %1 = load <4 x i32>, ptr @llvm_mips_or_v_w_ARG2
755 %2 = or <4 x i32> %0, %1
756 store <4 x i32> %2, ptr @llvm_mips_or_v_w_RES
760 ; ANYENDIAN: or_v_w_test:
765 ; ANYENDIAN: .size or_v_w_test
768 define void @or_v_d_test() nounwind {
770 %0 = load <2 x i64>, ptr @llvm_mips_or_v_d_ARG1
771 %1 = load <2 x i64>, ptr @llvm_mips_or_v_d_ARG2
772 %2 = or <2 x i64> %0, %1
773 store <2 x i64> %2, ptr @llvm_mips_or_v_d_RES
777 ; ANYENDIAN: or_v_d_test:
782 ; ANYENDIAN: .size or_v_d_test
784 @llvm_mips_xor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
785 @llvm_mips_xor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
786 @llvm_mips_xor_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
788 define void @llvm_mips_xor_v_b_test() nounwind {
790 %0 = load <16 x i8>, ptr @llvm_mips_xor_v_b_ARG1
791 %1 = load <16 x i8>, ptr @llvm_mips_xor_v_b_ARG2
792 %2 = bitcast <16 x i8> %0 to <16 x i8>
793 %3 = bitcast <16 x i8> %1 to <16 x i8>
794 %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
795 %5 = bitcast <16 x i8> %4 to <16 x i8>
796 store <16 x i8> %5, ptr @llvm_mips_xor_v_b_RES
800 ; ANYENDIAN: llvm_mips_xor_v_b_test:
805 ; ANYENDIAN: .size llvm_mips_xor_v_b_test
807 @llvm_mips_xor_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
808 @llvm_mips_xor_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
809 @llvm_mips_xor_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
811 define void @llvm_mips_xor_v_h_test() nounwind {
813 %0 = load <8 x i16>, ptr @llvm_mips_xor_v_h_ARG1
814 %1 = load <8 x i16>, ptr @llvm_mips_xor_v_h_ARG2
815 %2 = bitcast <8 x i16> %0 to <16 x i8>
816 %3 = bitcast <8 x i16> %1 to <16 x i8>
817 %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
818 %5 = bitcast <16 x i8> %4 to <8 x i16>
819 store <8 x i16> %5, ptr @llvm_mips_xor_v_h_RES
823 ; ANYENDIAN: llvm_mips_xor_v_h_test:
828 ; ANYENDIAN: .size llvm_mips_xor_v_h_test
830 @llvm_mips_xor_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
831 @llvm_mips_xor_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
832 @llvm_mips_xor_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
834 define void @llvm_mips_xor_v_w_test() nounwind {
836 %0 = load <4 x i32>, ptr @llvm_mips_xor_v_w_ARG1
837 %1 = load <4 x i32>, ptr @llvm_mips_xor_v_w_ARG2
838 %2 = bitcast <4 x i32> %0 to <16 x i8>
839 %3 = bitcast <4 x i32> %1 to <16 x i8>
840 %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
841 %5 = bitcast <16 x i8> %4 to <4 x i32>
842 store <4 x i32> %5, ptr @llvm_mips_xor_v_w_RES
846 ; ANYENDIAN: llvm_mips_xor_v_w_test:
851 ; ANYENDIAN: .size llvm_mips_xor_v_w_test
853 @llvm_mips_xor_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
854 @llvm_mips_xor_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
855 @llvm_mips_xor_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
857 define void @llvm_mips_xor_v_d_test() nounwind {
859 %0 = load <2 x i64>, ptr @llvm_mips_xor_v_d_ARG1
860 %1 = load <2 x i64>, ptr @llvm_mips_xor_v_d_ARG2
861 %2 = bitcast <2 x i64> %0 to <16 x i8>
862 %3 = bitcast <2 x i64> %1 to <16 x i8>
863 %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
864 %5 = bitcast <16 x i8> %4 to <2 x i64>
865 store <2 x i64> %5, ptr @llvm_mips_xor_v_d_RES
869 ; ANYENDIAN: llvm_mips_xor_v_d_test:
874 ; ANYENDIAN: .size llvm_mips_xor_v_d_test
876 define void @xor_v_b_test() nounwind {
878 %0 = load <16 x i8>, ptr @llvm_mips_xor_v_b_ARG1
879 %1 = load <16 x i8>, ptr @llvm_mips_xor_v_b_ARG2
880 %2 = xor <16 x i8> %0, %1
881 store <16 x i8> %2, ptr @llvm_mips_xor_v_b_RES
885 ; ANYENDIAN: xor_v_b_test:
890 ; ANYENDIAN: .size xor_v_b_test
892 define void @xor_v_h_test() nounwind {
894 %0 = load <8 x i16>, ptr @llvm_mips_xor_v_h_ARG1
895 %1 = load <8 x i16>, ptr @llvm_mips_xor_v_h_ARG2
896 %2 = xor <8 x i16> %0, %1
897 store <8 x i16> %2, ptr @llvm_mips_xor_v_h_RES
901 ; ANYENDIAN: xor_v_h_test:
906 ; ANYENDIAN: .size xor_v_h_test
909 define void @xor_v_w_test() nounwind {
911 %0 = load <4 x i32>, ptr @llvm_mips_xor_v_w_ARG1
912 %1 = load <4 x i32>, ptr @llvm_mips_xor_v_w_ARG2
913 %2 = xor <4 x i32> %0, %1
914 store <4 x i32> %2, ptr @llvm_mips_xor_v_w_RES
918 ; ANYENDIAN: xor_v_w_test:
923 ; ANYENDIAN: .size xor_v_w_test
926 define void @xor_v_d_test() nounwind {
928 %0 = load <2 x i64>, ptr @llvm_mips_xor_v_d_ARG1
929 %1 = load <2 x i64>, ptr @llvm_mips_xor_v_d_ARG2
930 %2 = xor <2 x i64> %0, %1
931 store <2 x i64> %2, ptr @llvm_mips_xor_v_d_RES
935 ; ANYENDIAN: xor_v_d_test:
940 ; ANYENDIAN: .size xor_v_d_test
942 declare <16 x i8> @llvm.mips.and.v(<16 x i8>, <16 x i8>) nounwind
943 declare <16 x i8> @llvm.mips.bmnz.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
944 declare <16 x i8> @llvm.mips.bmz.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
945 declare <16 x i8> @llvm.mips.bsel.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
946 declare <16 x i8> @llvm.mips.nor.v(<16 x i8>, <16 x i8>) nounwind
947 declare <16 x i8> @llvm.mips.or.v(<16 x i8>, <16 x i8>) nounwind
948 declare <16 x i8> @llvm.mips.xor.v(<16 x i8>, <16 x i8>) nounwind