1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
5 declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>)
6 define void @add_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
8 declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>)
9 define void @add_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
11 declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>)
12 define void @add_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
14 declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>)
15 define void @add_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
17 declare <16 x i8> @llvm.mips.addvi.b(<16 x i8>, i32 immarg)
18 define void @add_v16i8_builtin_imm(<16 x i8>* %a, <16 x i8>* %c) { entry: ret void }
20 declare <8 x i16> @llvm.mips.addvi.h(<8 x i16>, i32 immarg)
21 define void @add_v8i16_builtin_imm(<8 x i16>* %a, <8 x i16>* %c) { entry: ret void }
23 declare <4 x i32> @llvm.mips.addvi.w(<4 x i32>, i32 immarg)
24 define void @add_v4i32_builtin_imm(<4 x i32>* %a, <4 x i32>* %c) { entry: ret void }
26 declare <2 x i64> @llvm.mips.addvi.d(<2 x i64>, i32 immarg)
27 define void @add_v2i64_builtin_imm(<2 x i64>* %a, <2 x i64>* %c) { entry: ret void }
31 name: add_v16i8_builtin
33 tracksRegLiveness: true
36 liveins: $a0, $a1, $a2
38 ; P5600-LABEL: name: add_v16i8_builtin
39 ; P5600: liveins: $a0, $a1, $a2
40 ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
41 ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
42 ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
43 ; P5600: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
44 ; P5600: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
45 ; P5600: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[LOAD]], [[LOAD1]]
46 ; P5600: G_STORE [[ADD]](<16 x s8>), [[COPY2]](p0) :: (store 16 into %ir.c)
51 %3:_(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
52 %4:_(<16 x s8>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
53 %5:_(<16 x s8>) = G_INTRINSIC intrinsic(@llvm.mips.addv.b), %3(<16 x s8>), %4(<16 x s8>)
54 G_STORE %5(<16 x s8>), %2(p0) :: (store 16 into %ir.c)
59 name: add_v8i16_builtin
61 tracksRegLiveness: true
64 liveins: $a0, $a1, $a2
66 ; P5600-LABEL: name: add_v8i16_builtin
67 ; P5600: liveins: $a0, $a1, $a2
68 ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
69 ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
70 ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
71 ; P5600: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
72 ; P5600: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
73 ; P5600: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[LOAD]], [[LOAD1]]
74 ; P5600: G_STORE [[ADD]](<8 x s16>), [[COPY2]](p0) :: (store 16 into %ir.c)
79 %3:_(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
80 %4:_(<8 x s16>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
81 %5:_(<8 x s16>) = G_INTRINSIC intrinsic(@llvm.mips.addv.h), %3(<8 x s16>), %4(<8 x s16>)
82 G_STORE %5(<8 x s16>), %2(p0) :: (store 16 into %ir.c)
87 name: add_v4i32_builtin
89 tracksRegLiveness: true
92 liveins: $a0, $a1, $a2
94 ; P5600-LABEL: name: add_v4i32_builtin
95 ; P5600: liveins: $a0, $a1, $a2
96 ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
97 ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
98 ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
99 ; P5600: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
100 ; P5600: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
101 ; P5600: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[LOAD]], [[LOAD1]]
102 ; P5600: G_STORE [[ADD]](<4 x s32>), [[COPY2]](p0) :: (store 16 into %ir.c)
107 %3:_(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
108 %4:_(<4 x s32>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
109 %5:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.mips.addv.w), %3(<4 x s32>), %4(<4 x s32>)
110 G_STORE %5(<4 x s32>), %2(p0) :: (store 16 into %ir.c)
115 name: add_v2i64_builtin
117 tracksRegLiveness: true
120 liveins: $a0, $a1, $a2
122 ; P5600-LABEL: name: add_v2i64_builtin
123 ; P5600: liveins: $a0, $a1, $a2
124 ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
125 ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
126 ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
127 ; P5600: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
128 ; P5600: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
129 ; P5600: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[LOAD]], [[LOAD1]]
130 ; P5600: G_STORE [[ADD]](<2 x s64>), [[COPY2]](p0) :: (store 16 into %ir.c)
135 %3:_(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
136 %4:_(<2 x s64>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
137 %5:_(<2 x s64>) = G_INTRINSIC intrinsic(@llvm.mips.addv.d), %3(<2 x s64>), %4(<2 x s64>)
138 G_STORE %5(<2 x s64>), %2(p0) :: (store 16 into %ir.c)
143 name: add_v16i8_builtin_imm
145 tracksRegLiveness: true
150 ; P5600-LABEL: name: add_v16i8_builtin_imm
151 ; P5600: liveins: $a0, $a1
152 ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
153 ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
154 ; P5600: [[LOAD:%[0-9]+]]:msa128b(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
155 ; P5600: [[ADDVI_B:%[0-9]+]]:msa128b(<16 x s8>) = ADDVI_B [[LOAD]](<16 x s8>), 3
156 ; P5600: G_STORE [[ADDVI_B]](<16 x s8>), [[COPY1]](p0) :: (store 16 into %ir.c)
160 %2:_(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
161 %3:_(<16 x s8>) = G_INTRINSIC intrinsic(@llvm.mips.addvi.b), %2(<16 x s8>), 3
162 G_STORE %3(<16 x s8>), %1(p0) :: (store 16 into %ir.c)
167 name: add_v8i16_builtin_imm
169 tracksRegLiveness: true
174 ; P5600-LABEL: name: add_v8i16_builtin_imm
175 ; P5600: liveins: $a0, $a1
176 ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
177 ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
178 ; P5600: [[LOAD:%[0-9]+]]:msa128h(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
179 ; P5600: [[ADDVI_H:%[0-9]+]]:msa128h(<8 x s16>) = ADDVI_H [[LOAD]](<8 x s16>), 18
180 ; P5600: G_STORE [[ADDVI_H]](<8 x s16>), [[COPY1]](p0) :: (store 16 into %ir.c)
184 %2:_(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
185 %3:_(<8 x s16>) = G_INTRINSIC intrinsic(@llvm.mips.addvi.h), %2(<8 x s16>), 18
186 G_STORE %3(<8 x s16>), %1(p0) :: (store 16 into %ir.c)
191 name: add_v4i32_builtin_imm
193 tracksRegLiveness: true
198 ; P5600-LABEL: name: add_v4i32_builtin_imm
199 ; P5600: liveins: $a0, $a1
200 ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
201 ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
202 ; P5600: [[LOAD:%[0-9]+]]:msa128w(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
203 ; P5600: [[ADDVI_W:%[0-9]+]]:msa128w(<4 x s32>) = ADDVI_W [[LOAD]](<4 x s32>), 25
204 ; P5600: G_STORE [[ADDVI_W]](<4 x s32>), [[COPY1]](p0) :: (store 16 into %ir.c)
208 %2:_(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
209 %3:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.mips.addvi.w), %2(<4 x s32>), 25
210 G_STORE %3(<4 x s32>), %1(p0) :: (store 16 into %ir.c)
215 name: add_v2i64_builtin_imm
217 tracksRegLiveness: true
222 ; P5600-LABEL: name: add_v2i64_builtin_imm
223 ; P5600: liveins: $a0, $a1
224 ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
225 ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
226 ; P5600: [[LOAD:%[0-9]+]]:msa128d(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
227 ; P5600: [[ADDVI_D:%[0-9]+]]:msa128d(<2 x s64>) = ADDVI_D [[LOAD]](<2 x s64>), 31
228 ; P5600: G_STORE [[ADDVI_D]](<2 x s64>), [[COPY1]](p0) :: (store 16 into %ir.c)
232 %2:_(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
233 %3:_(<2 x s64>) = G_INTRINSIC intrinsic(@llvm.mips.addvi.d), %2(<2 x s64>), 31
234 G_STORE %3(<2 x s64>), %1(p0) :: (store 16 into %ir.c)