1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s
4 define arm_aapcs_vfpcc <16 x i8> @sadd_int8_t(<16 x i8> %src1, <16 x i8> %src2) {
5 ; CHECK-LABEL: sadd_int8_t:
6 ; CHECK: @ %bb.0: @ %entry
7 ; CHECK-NEXT: vqadd.s8 q0, q0, q1
10 %0 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %src1, <16 x i8> %src2)
14 define arm_aapcs_vfpcc <8 x i16> @sadd_int16_t(<8 x i16> %src1, <8 x i16> %src2) {
15 ; CHECK-LABEL: sadd_int16_t:
16 ; CHECK: @ %bb.0: @ %entry
17 ; CHECK-NEXT: vqadd.s16 q0, q0, q1
20 %0 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %src1, <8 x i16> %src2)
24 define arm_aapcs_vfpcc <4 x i32> @sadd_int32_t(<4 x i32> %src1, <4 x i32> %src2) {
25 ; CHECK-LABEL: sadd_int32_t:
26 ; CHECK: @ %bb.0: @ %entry
27 ; CHECK-NEXT: vqadd.s32 q0, q0, q1
30 %0 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %src1, <4 x i32> %src2)
34 define arm_aapcs_vfpcc <2 x i64> @sadd_int64_t(<2 x i64> %src1, <2 x i64> %src2) {
35 ; CHECK-LABEL: sadd_int64_t:
36 ; CHECK: @ %bb.0: @ %entry
37 ; CHECK-NEXT: .save {r4, r5, r7, lr}
38 ; CHECK-NEXT: push {r4, r5, r7, lr}
39 ; CHECK-NEXT: vmov r0, r1, d3
40 ; CHECK-NEXT: vmov r2, r3, d1
41 ; CHECK-NEXT: adds r2, r2, r0
42 ; CHECK-NEXT: eor.w r12, r3, r1
43 ; CHECK-NEXT: adc.w r0, r3, r1
44 ; CHECK-NEXT: eor.w r1, r3, r0
45 ; CHECK-NEXT: vmov r3, r4, d0
46 ; CHECK-NEXT: bic.w r1, r1, r12
47 ; CHECK-NEXT: cmp r1, #0
48 ; CHECK-NEXT: vmov lr, r1, d2
49 ; CHECK-NEXT: cset r12, mi
50 ; CHECK-NEXT: cmp.w r12, #0
52 ; CHECK-NEXT: asrne r2, r0, #31
53 ; CHECK-NEXT: adds.w r3, r3, lr
54 ; CHECK-NEXT: eor.w r5, r4, r1
55 ; CHECK-NEXT: adcs r1, r4
56 ; CHECK-NEXT: eors r4, r1
57 ; CHECK-NEXT: bic.w r5, r4, r5
58 ; CHECK-NEXT: cmp r5, #0
59 ; CHECK-NEXT: cset r5, mi
60 ; CHECK-NEXT: cmp r5, #0
62 ; CHECK-NEXT: asrne r3, r1, #31
63 ; CHECK-NEXT: vmov q0[2], q0[0], r3, r2
64 ; CHECK-NEXT: cmp.w r12, #0
65 ; CHECK-NEXT: mov.w r2, #-2147483648
67 ; CHECK-NEXT: eorne.w r0, r2, r0, asr #31
68 ; CHECK-NEXT: cmp r5, #0
70 ; CHECK-NEXT: eorne.w r1, r2, r1, asr #31
71 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0
72 ; CHECK-NEXT: pop {r4, r5, r7, pc}
74 %0 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %src1, <2 x i64> %src2)
78 define arm_aapcs_vfpcc <16 x i8> @uadd_int8_t(<16 x i8> %src1, <16 x i8> %src2) {
79 ; CHECK-LABEL: uadd_int8_t:
80 ; CHECK: @ %bb.0: @ %entry
81 ; CHECK-NEXT: vqadd.u8 q0, q0, q1
84 %0 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %src1, <16 x i8> %src2)
88 define arm_aapcs_vfpcc <8 x i16> @uadd_int16_t(<8 x i16> %src1, <8 x i16> %src2) {
89 ; CHECK-LABEL: uadd_int16_t:
90 ; CHECK: @ %bb.0: @ %entry
91 ; CHECK-NEXT: vqadd.u16 q0, q0, q1
94 %0 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %src1, <8 x i16> %src2)
98 define arm_aapcs_vfpcc <4 x i32> @uadd_int32_t(<4 x i32> %src1, <4 x i32> %src2) {
99 ; CHECK-LABEL: uadd_int32_t:
100 ; CHECK: @ %bb.0: @ %entry
101 ; CHECK-NEXT: vqadd.u32 q0, q0, q1
104 %0 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %src1, <4 x i32> %src2)
108 define arm_aapcs_vfpcc <2 x i64> @uadd_int64_t(<2 x i64> %src1, <2 x i64> %src2) {
109 ; CHECK-LABEL: uadd_int64_t:
110 ; CHECK: @ %bb.0: @ %entry
111 ; CHECK-NEXT: .save {r4, r5, r7, lr}
112 ; CHECK-NEXT: push {r4, r5, r7, lr}
113 ; CHECK-NEXT: vmov r0, r1, d3
114 ; CHECK-NEXT: mov.w r12, #0
115 ; CHECK-NEXT: vmov r2, r3, d1
116 ; CHECK-NEXT: vmov r4, r5, d0
117 ; CHECK-NEXT: adds r0, r0, r2
118 ; CHECK-NEXT: adcs r1, r3
119 ; CHECK-NEXT: vmov r3, r2, d2
120 ; CHECK-NEXT: adcs lr, r12, #0
122 ; CHECK-NEXT: movne.w r0, #-1
123 ; CHECK-NEXT: adds r3, r3, r4
124 ; CHECK-NEXT: adcs r2, r5
125 ; CHECK-NEXT: adcs r5, r12, #0
127 ; CHECK-NEXT: movne.w r3, #-1
128 ; CHECK-NEXT: cmp.w lr, #0
129 ; CHECK-NEXT: vmov q0[2], q0[0], r3, r0
131 ; CHECK-NEXT: movne.w r1, #-1
132 ; CHECK-NEXT: cmp r5, #0
134 ; CHECK-NEXT: movne.w r2, #-1
135 ; CHECK-NEXT: vmov q0[3], q0[1], r2, r1
136 ; CHECK-NEXT: pop {r4, r5, r7, pc}
138 %0 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %src1, <2 x i64> %src2)
143 define arm_aapcs_vfpcc <16 x i8> @ssub_int8_t(<16 x i8> %src1, <16 x i8> %src2) {
144 ; CHECK-LABEL: ssub_int8_t:
145 ; CHECK: @ %bb.0: @ %entry
146 ; CHECK-NEXT: vqsub.s8 q0, q0, q1
149 %0 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %src1, <16 x i8> %src2)
153 define arm_aapcs_vfpcc <8 x i16> @ssub_int16_t(<8 x i16> %src1, <8 x i16> %src2) {
154 ; CHECK-LABEL: ssub_int16_t:
155 ; CHECK: @ %bb.0: @ %entry
156 ; CHECK-NEXT: vqsub.s16 q0, q0, q1
159 %0 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %src1, <8 x i16> %src2)
163 define arm_aapcs_vfpcc <4 x i32> @ssub_int32_t(<4 x i32> %src1, <4 x i32> %src2) {
164 ; CHECK-LABEL: ssub_int32_t:
165 ; CHECK: @ %bb.0: @ %entry
166 ; CHECK-NEXT: vqsub.s32 q0, q0, q1
169 %0 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %src1, <4 x i32> %src2)
173 define arm_aapcs_vfpcc <2 x i64> @ssub_int64_t(<2 x i64> %src1, <2 x i64> %src2) {
174 ; CHECK-LABEL: ssub_int64_t:
175 ; CHECK: @ %bb.0: @ %entry
176 ; CHECK-NEXT: .save {r4, r5, r7, lr}
177 ; CHECK-NEXT: push {r4, r5, r7, lr}
178 ; CHECK-NEXT: vmov r0, r1, d3
179 ; CHECK-NEXT: vmov r2, r3, d1
180 ; CHECK-NEXT: subs r2, r2, r0
181 ; CHECK-NEXT: eor.w r12, r3, r1
182 ; CHECK-NEXT: sbc.w r0, r3, r1
183 ; CHECK-NEXT: eor.w r1, r3, r0
184 ; CHECK-NEXT: vmov r3, r4, d0
185 ; CHECK-NEXT: ands.w r1, r1, r12
186 ; CHECK-NEXT: vmov lr, r1, d2
187 ; CHECK-NEXT: cset r12, mi
188 ; CHECK-NEXT: cmp.w r12, #0
190 ; CHECK-NEXT: asrne r2, r0, #31
191 ; CHECK-NEXT: subs.w r3, r3, lr
192 ; CHECK-NEXT: eor.w r5, r4, r1
193 ; CHECK-NEXT: sbc.w r1, r4, r1
194 ; CHECK-NEXT: eors r4, r1
195 ; CHECK-NEXT: ands r5, r4
196 ; CHECK-NEXT: cset r5, mi
197 ; CHECK-NEXT: cmp r5, #0
199 ; CHECK-NEXT: asrne r3, r1, #31
200 ; CHECK-NEXT: vmov q0[2], q0[0], r3, r2
201 ; CHECK-NEXT: cmp.w r12, #0
202 ; CHECK-NEXT: mov.w r2, #-2147483648
204 ; CHECK-NEXT: eorne.w r0, r2, r0, asr #31
205 ; CHECK-NEXT: cmp r5, #0
207 ; CHECK-NEXT: eorne.w r1, r2, r1, asr #31
208 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0
209 ; CHECK-NEXT: pop {r4, r5, r7, pc}
211 %0 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %src1, <2 x i64> %src2)
215 define arm_aapcs_vfpcc <16 x i8> @usub_int8_t(<16 x i8> %src1, <16 x i8> %src2) {
216 ; CHECK-LABEL: usub_int8_t:
217 ; CHECK: @ %bb.0: @ %entry
218 ; CHECK-NEXT: vqsub.u8 q0, q0, q1
221 %0 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %src1, <16 x i8> %src2)
225 define arm_aapcs_vfpcc <8 x i16> @usub_int16_t(<8 x i16> %src1, <8 x i16> %src2) {
226 ; CHECK-LABEL: usub_int16_t:
227 ; CHECK: @ %bb.0: @ %entry
228 ; CHECK-NEXT: vqsub.u16 q0, q0, q1
231 %0 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %src1, <8 x i16> %src2)
235 define arm_aapcs_vfpcc <4 x i32> @usub_int32_t(<4 x i32> %src1, <4 x i32> %src2) {
236 ; CHECK-LABEL: usub_int32_t:
237 ; CHECK: @ %bb.0: @ %entry
238 ; CHECK-NEXT: vqsub.u32 q0, q0, q1
241 %0 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %src1, <4 x i32> %src2)
245 define arm_aapcs_vfpcc <2 x i64> @usub_int64_t(<2 x i64> %src1, <2 x i64> %src2) {
246 ; CHECK-LABEL: usub_int64_t:
247 ; CHECK: @ %bb.0: @ %entry
248 ; CHECK-NEXT: .save {r4, r5, r7, lr}
249 ; CHECK-NEXT: push {r4, r5, r7, lr}
250 ; CHECK-NEXT: vmov r0, r1, d3
251 ; CHECK-NEXT: mov.w r12, #0
252 ; CHECK-NEXT: vmov r2, r3, d1
253 ; CHECK-NEXT: vmov r4, r5, d0
254 ; CHECK-NEXT: subs r0, r2, r0
255 ; CHECK-NEXT: sbcs.w r1, r3, r1
256 ; CHECK-NEXT: adc r2, r12, #0
257 ; CHECK-NEXT: rsbs.w lr, r2, #1
258 ; CHECK-NEXT: vmov r3, r2, d2
260 ; CHECK-NEXT: movne r0, #0
261 ; CHECK-NEXT: subs r3, r4, r3
262 ; CHECK-NEXT: sbcs.w r2, r5, r2
263 ; CHECK-NEXT: adc r5, r12, #0
264 ; CHECK-NEXT: rsbs.w r5, r5, #1
266 ; CHECK-NEXT: movne r3, #0
267 ; CHECK-NEXT: cmp.w lr, #0
268 ; CHECK-NEXT: vmov q0[2], q0[0], r3, r0
270 ; CHECK-NEXT: movne r1, #0
271 ; CHECK-NEXT: cmp r5, #0
273 ; CHECK-NEXT: movne r2, #0
274 ; CHECK-NEXT: vmov q0[3], q0[1], r2, r1
275 ; CHECK-NEXT: pop {r4, r5, r7, pc}
277 %0 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %src1, <2 x i64> %src2)
282 declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %src1, <16 x i8> %src2)
283 declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %src1, <8 x i16> %src2)
284 declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %src1, <4 x i32> %src2)
285 declare <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %src1, <2 x i64> %src2)
286 declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %src1, <16 x i8> %src2)
287 declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %src1, <8 x i16> %src2)
288 declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %src1, <4 x i32> %src2)
289 declare <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %src1, <2 x i64> %src2)
290 declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %src1, <16 x i8> %src2)
291 declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %src1, <8 x i16> %src2)
292 declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %src1, <4 x i32> %src2)
293 declare <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %src1, <2 x i64> %src2)
294 declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %src1, <16 x i8> %src2)
295 declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %src1, <8 x i16> %src2)
296 declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %src1, <4 x i32> %src2)
297 declare <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %src1, <2 x i64> %src2)