1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s \
3 ; RUN: | FileCheck %s --check-prefixes=ALL,MIPS
4 ; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 < %s \
5 ; RUN: | FileCheck %s --check-prefixes=ALL,MIPSEL
7 ; Test the MSA intrinsics that are encoded with the I5 instruction format.
8 ; There are lots of these so this covers those beginning with 's'
10 @llvm_mips_subvi_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
11 @llvm_mips_subvi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
13 define void @llvm_mips_subvi_b_test() nounwind {
14 ; ALL-LABEL: llvm_mips_subvi_b_test:
15 ; ALL: # %bb.0: # %entry
16 ; ALL-NEXT: lui $1, %hi(llvm_mips_subvi_b_RES)
17 ; ALL-NEXT: addiu $1, $1, %lo(llvm_mips_subvi_b_RES)
18 ; ALL-NEXT: lui $2, %hi(llvm_mips_subvi_b_ARG1)
19 ; ALL-NEXT: addiu $2, $2, %lo(llvm_mips_subvi_b_ARG1)
20 ; ALL-NEXT: ld.b $w0, 0($2)
21 ; ALL-NEXT: subvi.b $w0, $w0, 14
23 ; ALL-NEXT: st.b $w0, 0($1)
25 %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subvi_b_ARG1
26 %1 = tail call <16 x i8> @llvm.mips.subvi.b(<16 x i8> %0, i32 14)
27 store <16 x i8> %1, <16 x i8>* @llvm_mips_subvi_b_RES
31 declare <16 x i8> @llvm.mips.subvi.b(<16 x i8>, i32) nounwind
33 @llvm_mips_subvi_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
34 @llvm_mips_subvi_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
36 define void @llvm_mips_subvi_h_test() nounwind {
37 ; ALL-LABEL: llvm_mips_subvi_h_test:
38 ; ALL: # %bb.0: # %entry
39 ; ALL-NEXT: lui $1, %hi(llvm_mips_subvi_h_RES)
40 ; ALL-NEXT: addiu $1, $1, %lo(llvm_mips_subvi_h_RES)
41 ; ALL-NEXT: lui $2, %hi(llvm_mips_subvi_h_ARG1)
42 ; ALL-NEXT: addiu $2, $2, %lo(llvm_mips_subvi_h_ARG1)
43 ; ALL-NEXT: ld.h $w0, 0($2)
44 ; ALL-NEXT: subvi.h $w0, $w0, 14
46 ; ALL-NEXT: st.h $w0, 0($1)
48 %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subvi_h_ARG1
49 %1 = tail call <8 x i16> @llvm.mips.subvi.h(<8 x i16> %0, i32 14)
50 store <8 x i16> %1, <8 x i16>* @llvm_mips_subvi_h_RES
54 declare <8 x i16> @llvm.mips.subvi.h(<8 x i16>, i32) nounwind
56 @llvm_mips_subvi_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
57 @llvm_mips_subvi_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
59 define void @llvm_mips_subvi_w_test() nounwind {
60 ; ALL-LABEL: llvm_mips_subvi_w_test:
61 ; ALL: # %bb.0: # %entry
62 ; ALL-NEXT: lui $1, %hi(llvm_mips_subvi_w_RES)
63 ; ALL-NEXT: addiu $1, $1, %lo(llvm_mips_subvi_w_RES)
64 ; ALL-NEXT: lui $2, %hi(llvm_mips_subvi_w_ARG1)
65 ; ALL-NEXT: addiu $2, $2, %lo(llvm_mips_subvi_w_ARG1)
66 ; ALL-NEXT: ld.w $w0, 0($2)
67 ; ALL-NEXT: subvi.w $w0, $w0, 14
69 ; ALL-NEXT: st.w $w0, 0($1)
71 %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subvi_w_ARG1
72 %1 = tail call <4 x i32> @llvm.mips.subvi.w(<4 x i32> %0, i32 14)
73 store <4 x i32> %1, <4 x i32>* @llvm_mips_subvi_w_RES
77 declare <4 x i32> @llvm.mips.subvi.w(<4 x i32>, i32) nounwind
79 @llvm_mips_subvi_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
80 @llvm_mips_subvi_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
82 define void @llvm_mips_subvi_d_test() nounwind {
83 ; ALL-LABEL: llvm_mips_subvi_d_test:
84 ; ALL: # %bb.0: # %entry
85 ; ALL-NEXT: lui $1, %hi(llvm_mips_subvi_d_RES)
86 ; ALL-NEXT: addiu $1, $1, %lo(llvm_mips_subvi_d_RES)
87 ; ALL-NEXT: lui $2, %hi(llvm_mips_subvi_d_ARG1)
88 ; ALL-NEXT: addiu $2, $2, %lo(llvm_mips_subvi_d_ARG1)
89 ; ALL-NEXT: ld.d $w0, 0($2)
90 ; ALL-NEXT: subvi.d $w0, $w0, 14
92 ; ALL-NEXT: st.d $w0, 0($1)
94 %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subvi_d_ARG1
95 %1 = tail call <2 x i64> @llvm.mips.subvi.d(<2 x i64> %0, i32 14)
96 store <2 x i64> %1, <2 x i64>* @llvm_mips_subvi_d_RES
100 declare <2 x i64> @llvm.mips.subvi.d(<2 x i64>, i32) nounwind