1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=aarch64-unknown-unknown | FileCheck %s
4 declare void @use(i32 %arg)
5 declare void @vec_use(<4 x i32> %arg)
9 define i32 @add_const_add_const(i32 %arg) {
10 ; CHECK-LABEL: add_const_add_const:
12 ; CHECK-NEXT: add w0, w0, #10 // =10
19 define i32 @add_const_add_const_extrause(i32 %arg) {
20 ; CHECK-LABEL: add_const_add_const_extrause:
22 ; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
23 ; CHECK-NEXT: .cfi_def_cfa_offset 16
24 ; CHECK-NEXT: .cfi_offset w19, -8
25 ; CHECK-NEXT: .cfi_offset w30, -16
26 ; CHECK-NEXT: mov w19, w0
27 ; CHECK-NEXT: add w0, w0, #8 // =8
29 ; CHECK-NEXT: add w0, w19, #10 // =10
30 ; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
33 call void @use(i32 %t0)
38 define <4 x i32> @vec_add_const_add_const(<4 x i32> %arg) {
39 ; CHECK-LABEL: vec_add_const_add_const:
41 ; CHECK-NEXT: movi v1.4s, #10
42 ; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
44 %t0 = add <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
45 %t1 = add <4 x i32> %t0, <i32 2, i32 2, i32 2, i32 2>
49 define <4 x i32> @vec_add_const_add_const_extrause(<4 x i32> %arg) {
50 ; CHECK-LABEL: vec_add_const_add_const_extrause:
52 ; CHECK-NEXT: sub sp, sp, #32 // =32
53 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
54 ; CHECK-NEXT: .cfi_def_cfa_offset 32
55 ; CHECK-NEXT: .cfi_offset w30, -16
56 ; CHECK-NEXT: movi v1.4s, #8
57 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
58 ; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
59 ; CHECK-NEXT: bl vec_use
60 ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
61 ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
62 ; CHECK-NEXT: movi v0.4s, #10
63 ; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
64 ; CHECK-NEXT: add sp, sp, #32 // =32
66 %t0 = add <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
67 call void @vec_use(<4 x i32> %t0)
68 %t1 = add <4 x i32> %t0, <i32 2, i32 2, i32 2, i32 2>
72 define <4 x i32> @vec_add_const_add_const_nonsplat(<4 x i32> %arg) {
73 ; CHECK-LABEL: vec_add_const_add_const_nonsplat:
75 ; CHECK-NEXT: adrp x8, .LCPI4_0
76 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI4_0]
77 ; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
79 %t0 = add <4 x i32> %arg, <i32 21, i32 undef, i32 8, i32 8>
80 %t1 = add <4 x i32> %t0, <i32 2, i32 3, i32 undef, i32 2>
86 define i32 @add_const_sub_const(i32 %arg) {
87 ; CHECK-LABEL: add_const_sub_const:
89 ; CHECK-NEXT: add w0, w0, #6 // =6
96 define i32 @add_const_sub_const_extrause(i32 %arg) {
97 ; CHECK-LABEL: add_const_sub_const_extrause:
99 ; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
100 ; CHECK-NEXT: .cfi_def_cfa_offset 16
101 ; CHECK-NEXT: .cfi_offset w19, -8
102 ; CHECK-NEXT: .cfi_offset w30, -16
103 ; CHECK-NEXT: mov w19, w0
104 ; CHECK-NEXT: add w0, w0, #8 // =8
106 ; CHECK-NEXT: add w0, w19, #6 // =6
107 ; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
109 %t0 = add i32 %arg, 8
110 call void @use(i32 %t0)
115 define <4 x i32> @vec_add_const_sub_const(<4 x i32> %arg) {
116 ; CHECK-LABEL: vec_add_const_sub_const:
118 ; CHECK-NEXT: movi v1.4s, #6
119 ; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
121 %t0 = add <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
122 %t1 = sub <4 x i32> %t0, <i32 2, i32 2, i32 2, i32 2>
126 define <4 x i32> @vec_add_const_sub_const_extrause(<4 x i32> %arg) {
127 ; CHECK-LABEL: vec_add_const_sub_const_extrause:
129 ; CHECK-NEXT: sub sp, sp, #32 // =32
130 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
131 ; CHECK-NEXT: .cfi_def_cfa_offset 32
132 ; CHECK-NEXT: .cfi_offset w30, -16
133 ; CHECK-NEXT: movi v1.4s, #8
134 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
135 ; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
136 ; CHECK-NEXT: bl vec_use
137 ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
138 ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
139 ; CHECK-NEXT: movi v0.4s, #6
140 ; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
141 ; CHECK-NEXT: add sp, sp, #32 // =32
143 %t0 = add <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
144 call void @vec_use(<4 x i32> %t0)
145 %t1 = sub <4 x i32> %t0, <i32 2, i32 2, i32 2, i32 2>
149 define <4 x i32> @vec_add_const_sub_const_nonsplat(<4 x i32> %arg) {
150 ; CHECK-LABEL: vec_add_const_sub_const_nonsplat:
152 ; CHECK-NEXT: adrp x8, .LCPI9_0
153 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI9_0]
154 ; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
156 %t0 = add <4 x i32> %arg, <i32 21, i32 undef, i32 8, i32 8>
157 %t1 = sub <4 x i32> %t0, <i32 2, i32 3, i32 undef, i32 2>
163 define i32 @add_const_const_sub(i32 %arg) {
164 ; CHECK-LABEL: add_const_const_sub:
166 ; CHECK-NEXT: mov w8, #-6
167 ; CHECK-NEXT: sub w0, w8, w0
169 %t0 = add i32 %arg, 8
174 define i32 @add_const_const_sub_extrause(i32 %arg) {
175 ; CHECK-LABEL: add_const_const_sub_extrause:
177 ; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
178 ; CHECK-NEXT: .cfi_def_cfa_offset 16
179 ; CHECK-NEXT: .cfi_offset w19, -8
180 ; CHECK-NEXT: .cfi_offset w30, -16
181 ; CHECK-NEXT: mov w19, w0
182 ; CHECK-NEXT: add w0, w0, #8 // =8
184 ; CHECK-NEXT: mov w8, #-6
185 ; CHECK-NEXT: sub w0, w8, w19
186 ; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
188 %t0 = add i32 %arg, 8
189 call void @use(i32 %t0)
194 define <4 x i32> @vec_add_const_const_sub(<4 x i32> %arg) {
195 ; CHECK-LABEL: vec_add_const_const_sub:
197 ; CHECK-NEXT: mvni v1.4s, #5
198 ; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s
200 %t0 = add <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
201 %t1 = sub <4 x i32> <i32 2, i32 2, i32 2, i32 2>, %t0
205 define <4 x i32> @vec_add_const_const_sub_extrause(<4 x i32> %arg) {
206 ; CHECK-LABEL: vec_add_const_const_sub_extrause:
208 ; CHECK-NEXT: sub sp, sp, #32 // =32
209 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
210 ; CHECK-NEXT: .cfi_def_cfa_offset 32
211 ; CHECK-NEXT: .cfi_offset w30, -16
212 ; CHECK-NEXT: movi v1.4s, #8
213 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
214 ; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
215 ; CHECK-NEXT: bl vec_use
216 ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
217 ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
218 ; CHECK-NEXT: mvni v0.4s, #5
219 ; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
220 ; CHECK-NEXT: add sp, sp, #32 // =32
222 %t0 = add <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
223 call void @vec_use(<4 x i32> %t0)
224 %t1 = sub <4 x i32> <i32 2, i32 2, i32 2, i32 2>, %t0
228 define <4 x i32> @vec_add_const_const_sub_nonsplat(<4 x i32> %arg) {
229 ; CHECK-LABEL: vec_add_const_const_sub_nonsplat:
231 ; CHECK-NEXT: adrp x8, .LCPI14_0
232 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI14_0]
233 ; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s
235 %t0 = add <4 x i32> %arg, <i32 21, i32 undef, i32 8, i32 8>
236 %t1 = sub <4 x i32> <i32 2, i32 3, i32 undef, i32 2>, %t0
242 define i32 @sub_const_add_const(i32 %arg) {
243 ; CHECK-LABEL: sub_const_add_const:
245 ; CHECK-NEXT: sub w0, w0, #6 // =6
247 %t0 = sub i32 %arg, 8
252 define i32 @sub_const_add_const_extrause(i32 %arg) {
253 ; CHECK-LABEL: sub_const_add_const_extrause:
255 ; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
256 ; CHECK-NEXT: .cfi_def_cfa_offset 16
257 ; CHECK-NEXT: .cfi_offset w19, -8
258 ; CHECK-NEXT: .cfi_offset w30, -16
259 ; CHECK-NEXT: mov w19, w0
260 ; CHECK-NEXT: sub w0, w0, #8 // =8
262 ; CHECK-NEXT: sub w0, w19, #6 // =6
263 ; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
265 %t0 = sub i32 %arg, 8
266 call void @use(i32 %t0)
271 define <4 x i32> @vec_sub_const_add_const(<4 x i32> %arg) {
272 ; CHECK-LABEL: vec_sub_const_add_const:
274 ; CHECK-NEXT: mvni v1.4s, #5
275 ; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
277 %t0 = sub <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
278 %t1 = add <4 x i32> %t0, <i32 2, i32 2, i32 2, i32 2>
282 define <4 x i32> @vec_sub_const_add_const_extrause(<4 x i32> %arg) {
283 ; CHECK-LABEL: vec_sub_const_add_const_extrause:
285 ; CHECK-NEXT: sub sp, sp, #32 // =32
286 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
287 ; CHECK-NEXT: .cfi_def_cfa_offset 32
288 ; CHECK-NEXT: .cfi_offset w30, -16
289 ; CHECK-NEXT: movi v1.4s, #8
290 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
291 ; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
292 ; CHECK-NEXT: bl vec_use
293 ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
294 ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
295 ; CHECK-NEXT: mvni v0.4s, #5
296 ; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
297 ; CHECK-NEXT: add sp, sp, #32 // =32
299 %t0 = sub <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
300 call void @vec_use(<4 x i32> %t0)
301 %t1 = add <4 x i32> %t0, <i32 2, i32 2, i32 2, i32 2>
305 define <4 x i32> @vec_sub_const_add_const_nonsplat(<4 x i32> %arg) {
306 ; CHECK-LABEL: vec_sub_const_add_const_nonsplat:
308 ; CHECK-NEXT: adrp x8, .LCPI19_0
309 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI19_0]
310 ; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
312 %t0 = sub <4 x i32> %arg, <i32 21, i32 undef, i32 8, i32 8>
313 %t1 = add <4 x i32> %t0, <i32 2, i32 3, i32 undef, i32 2>
319 define i32 @sub_const_sub_const(i32 %arg) {
320 ; CHECK-LABEL: sub_const_sub_const:
322 ; CHECK-NEXT: sub w0, w0, #10 // =10
324 %t0 = sub i32 %arg, 8
329 define i32 @sub_const_sub_const_extrause(i32 %arg) {
330 ; CHECK-LABEL: sub_const_sub_const_extrause:
332 ; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
333 ; CHECK-NEXT: .cfi_def_cfa_offset 16
334 ; CHECK-NEXT: .cfi_offset w19, -8
335 ; CHECK-NEXT: .cfi_offset w30, -16
336 ; CHECK-NEXT: mov w19, w0
337 ; CHECK-NEXT: sub w0, w0, #8 // =8
339 ; CHECK-NEXT: sub w0, w19, #10 // =10
340 ; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
342 %t0 = sub i32 %arg, 8
343 call void @use(i32 %t0)
348 define <4 x i32> @vec_sub_const_sub_const(<4 x i32> %arg) {
349 ; CHECK-LABEL: vec_sub_const_sub_const:
351 ; CHECK-NEXT: movi v1.4s, #10
352 ; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
354 %t0 = sub <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
355 %t1 = sub <4 x i32> %t0, <i32 2, i32 2, i32 2, i32 2>
359 define <4 x i32> @vec_sub_const_sub_const_extrause(<4 x i32> %arg) {
360 ; CHECK-LABEL: vec_sub_const_sub_const_extrause:
362 ; CHECK-NEXT: sub sp, sp, #32 // =32
363 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
364 ; CHECK-NEXT: .cfi_def_cfa_offset 32
365 ; CHECK-NEXT: .cfi_offset w30, -16
366 ; CHECK-NEXT: movi v1.4s, #8
367 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
368 ; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
369 ; CHECK-NEXT: bl vec_use
370 ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
371 ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
372 ; CHECK-NEXT: movi v0.4s, #10
373 ; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s
374 ; CHECK-NEXT: add sp, sp, #32 // =32
376 %t0 = sub <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
377 call void @vec_use(<4 x i32> %t0)
378 %t1 = sub <4 x i32> %t0, <i32 2, i32 2, i32 2, i32 2>
382 define <4 x i32> @vec_sub_const_sub_const_nonsplat(<4 x i32> %arg) {
383 ; CHECK-LABEL: vec_sub_const_sub_const_nonsplat:
385 ; CHECK-NEXT: adrp x8, .LCPI24_0
386 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI24_0]
387 ; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
389 %t0 = sub <4 x i32> %arg, <i32 21, i32 undef, i32 8, i32 8>
390 %t1 = sub <4 x i32> %t0, <i32 2, i32 3, i32 undef, i32 2>
396 define i32 @sub_const_const_sub(i32 %arg) {
397 ; CHECK-LABEL: sub_const_const_sub:
399 ; CHECK-NEXT: mov w8, #10
400 ; CHECK-NEXT: sub w0, w8, w0
402 %t0 = sub i32 %arg, 8
407 define i32 @sub_const_const_sub_extrause(i32 %arg) {
408 ; CHECK-LABEL: sub_const_const_sub_extrause:
410 ; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
411 ; CHECK-NEXT: .cfi_def_cfa_offset 16
412 ; CHECK-NEXT: .cfi_offset w19, -8
413 ; CHECK-NEXT: .cfi_offset w30, -16
414 ; CHECK-NEXT: mov w19, w0
415 ; CHECK-NEXT: sub w0, w0, #8 // =8
417 ; CHECK-NEXT: mov w8, #10
418 ; CHECK-NEXT: sub w0, w8, w19
419 ; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
421 %t0 = sub i32 %arg, 8
422 call void @use(i32 %t0)
427 define <4 x i32> @vec_sub_const_const_sub(<4 x i32> %arg) {
428 ; CHECK-LABEL: vec_sub_const_const_sub:
430 ; CHECK-NEXT: movi v1.4s, #10
431 ; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s
433 %t0 = sub <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
434 %t1 = sub <4 x i32> <i32 2, i32 2, i32 2, i32 2>, %t0
438 define <4 x i32> @vec_sub_const_const_sub_extrause(<4 x i32> %arg) {
439 ; CHECK-LABEL: vec_sub_const_const_sub_extrause:
441 ; CHECK-NEXT: sub sp, sp, #32 // =32
442 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
443 ; CHECK-NEXT: .cfi_def_cfa_offset 32
444 ; CHECK-NEXT: .cfi_offset w30, -16
445 ; CHECK-NEXT: movi v1.4s, #8
446 ; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
447 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
448 ; CHECK-NEXT: bl vec_use
449 ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
450 ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
451 ; CHECK-NEXT: movi v0.4s, #2
452 ; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
453 ; CHECK-NEXT: add sp, sp, #32 // =32
455 %t0 = sub <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
456 call void @vec_use(<4 x i32> %t0)
457 %t1 = sub <4 x i32> <i32 2, i32 2, i32 2, i32 2>, %t0
461 define <4 x i32> @vec_sub_const_const_sub_nonsplat(<4 x i32> %arg) {
462 ; CHECK-LABEL: vec_sub_const_const_sub_nonsplat:
464 ; CHECK-NEXT: adrp x8, .LCPI29_0
465 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI29_0]
466 ; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s
468 %t0 = sub <4 x i32> %arg, <i32 21, i32 undef, i32 8, i32 8>
469 %t1 = sub <4 x i32> <i32 2, i32 3, i32 undef, i32 2>, %t0
475 define i32 @const_sub_add_const(i32 %arg) {
476 ; CHECK-LABEL: const_sub_add_const:
478 ; CHECK-NEXT: mov w8, #10
479 ; CHECK-NEXT: sub w0, w8, w0
481 %t0 = sub i32 8, %arg
486 define i32 @const_sub_add_const_extrause(i32 %arg) {
487 ; CHECK-LABEL: const_sub_add_const_extrause:
489 ; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
490 ; CHECK-NEXT: .cfi_def_cfa_offset 16
491 ; CHECK-NEXT: .cfi_offset w19, -8
492 ; CHECK-NEXT: .cfi_offset w30, -16
493 ; CHECK-NEXT: mov w8, #8
494 ; CHECK-NEXT: mov w19, w0
495 ; CHECK-NEXT: sub w0, w8, w0
497 ; CHECK-NEXT: mov w8, #10
498 ; CHECK-NEXT: sub w0, w8, w19
499 ; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
501 %t0 = sub i32 8, %arg
502 call void @use(i32 %t0)
507 define <4 x i32> @vec_const_sub_add_const(<4 x i32> %arg) {
508 ; CHECK-LABEL: vec_const_sub_add_const:
510 ; CHECK-NEXT: movi v1.4s, #10
511 ; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s
513 %t0 = sub <4 x i32> <i32 8, i32 8, i32 8, i32 8>, %arg
514 %t1 = add <4 x i32> %t0, <i32 2, i32 2, i32 2, i32 2>
518 define <4 x i32> @vec_const_sub_add_const_extrause(<4 x i32> %arg) {
519 ; CHECK-LABEL: vec_const_sub_add_const_extrause:
521 ; CHECK-NEXT: sub sp, sp, #32 // =32
522 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
523 ; CHECK-NEXT: .cfi_def_cfa_offset 32
524 ; CHECK-NEXT: .cfi_offset w30, -16
525 ; CHECK-NEXT: movi v1.4s, #8
526 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
527 ; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s
528 ; CHECK-NEXT: bl vec_use
529 ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
530 ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
531 ; CHECK-NEXT: movi v0.4s, #10
532 ; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
533 ; CHECK-NEXT: add sp, sp, #32 // =32
535 %t0 = sub <4 x i32> <i32 8, i32 8, i32 8, i32 8>, %arg
536 call void @vec_use(<4 x i32> %t0)
537 %t1 = add <4 x i32> %t0, <i32 2, i32 2, i32 2, i32 2>
541 define <4 x i32> @vec_const_sub_add_const_nonsplat(<4 x i32> %arg) {
542 ; CHECK-LABEL: vec_const_sub_add_const_nonsplat:
544 ; CHECK-NEXT: adrp x8, .LCPI34_0
545 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI34_0]
546 ; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s
548 %t0 = sub <4 x i32> <i32 21, i32 undef, i32 8, i32 8>, %arg
549 %t1 = add <4 x i32> %t0, <i32 2, i32 3, i32 undef, i32 2>
555 define i32 @const_sub_sub_const(i32 %arg) {
556 ; CHECK-LABEL: const_sub_sub_const:
558 ; CHECK-NEXT: mov w8, #6
559 ; CHECK-NEXT: sub w0, w8, w0
561 %t0 = sub i32 8, %arg
566 define i32 @const_sub_sub_const_extrause(i32 %arg) {
567 ; CHECK-LABEL: const_sub_sub_const_extrause:
569 ; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
570 ; CHECK-NEXT: .cfi_def_cfa_offset 16
571 ; CHECK-NEXT: .cfi_offset w19, -8
572 ; CHECK-NEXT: .cfi_offset w30, -16
573 ; CHECK-NEXT: mov w8, #8
574 ; CHECK-NEXT: mov w19, w0
575 ; CHECK-NEXT: sub w0, w8, w0
577 ; CHECK-NEXT: mov w8, #6
578 ; CHECK-NEXT: sub w0, w8, w19
579 ; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
581 %t0 = sub i32 8, %arg
582 call void @use(i32 %t0)
587 define <4 x i32> @vec_const_sub_sub_const(<4 x i32> %arg) {
588 ; CHECK-LABEL: vec_const_sub_sub_const:
590 ; CHECK-NEXT: movi v1.4s, #6
591 ; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s
593 %t0 = sub <4 x i32> <i32 8, i32 8, i32 8, i32 8>, %arg
594 %t1 = sub <4 x i32> %t0, <i32 2, i32 2, i32 2, i32 2>
598 define <4 x i32> @vec_const_sub_sub_const_extrause(<4 x i32> %arg) {
599 ; CHECK-LABEL: vec_const_sub_sub_const_extrause:
601 ; CHECK-NEXT: sub sp, sp, #32 // =32
602 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
603 ; CHECK-NEXT: .cfi_def_cfa_offset 32
604 ; CHECK-NEXT: .cfi_offset w30, -16
605 ; CHECK-NEXT: movi v1.4s, #8
606 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
607 ; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s
608 ; CHECK-NEXT: bl vec_use
609 ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
610 ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
611 ; CHECK-NEXT: movi v0.4s, #6
612 ; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
613 ; CHECK-NEXT: add sp, sp, #32 // =32
615 %t0 = sub <4 x i32> <i32 8, i32 8, i32 8, i32 8>, %arg
616 call void @vec_use(<4 x i32> %t0)
617 %t1 = sub <4 x i32> %t0, <i32 2, i32 2, i32 2, i32 2>
621 define <4 x i32> @vec_const_sub_sub_const_nonsplat(<4 x i32> %arg) {
622 ; CHECK-LABEL: vec_const_sub_sub_const_nonsplat:
624 ; CHECK-NEXT: adrp x8, .LCPI39_0
625 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI39_0]
626 ; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s
628 %t0 = sub <4 x i32> <i32 21, i32 undef, i32 8, i32 8>, %arg
629 %t1 = sub <4 x i32> %t0, <i32 2, i32 3, i32 undef, i32 2>
635 define i32 @const_sub_const_sub(i32 %arg) {
636 ; CHECK-LABEL: const_sub_const_sub:
638 ; CHECK-NEXT: sub w0, w0, #6 // =6
640 %t0 = sub i32 8, %arg
645 define i32 @const_sub_const_sub_extrause(i32 %arg) {
646 ; CHECK-LABEL: const_sub_const_sub_extrause:
648 ; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
649 ; CHECK-NEXT: .cfi_def_cfa_offset 16
650 ; CHECK-NEXT: .cfi_offset w19, -8
651 ; CHECK-NEXT: .cfi_offset w30, -16
652 ; CHECK-NEXT: mov w8, #8
653 ; CHECK-NEXT: sub w19, w8, w0
654 ; CHECK-NEXT: mov w0, w19
656 ; CHECK-NEXT: mov w8, #2
657 ; CHECK-NEXT: sub w0, w8, w19
658 ; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
660 %t0 = sub i32 8, %arg
661 call void @use(i32 %t0)
666 define <4 x i32> @vec_const_sub_const_sub(<4 x i32> %arg) {
667 ; CHECK-LABEL: vec_const_sub_const_sub:
669 ; CHECK-NEXT: mvni v1.4s, #5
670 ; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
672 %t0 = sub <4 x i32> <i32 8, i32 8, i32 8, i32 8>, %arg
673 %t1 = sub <4 x i32> <i32 2, i32 2, i32 2, i32 2>, %t0
677 define <4 x i32> @vec_const_sub_const_sub_extrause(<4 x i32> %arg) {
678 ; CHECK-LABEL: vec_const_sub_const_sub_extrause:
680 ; CHECK-NEXT: sub sp, sp, #32 // =32
681 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
682 ; CHECK-NEXT: .cfi_def_cfa_offset 32
683 ; CHECK-NEXT: .cfi_offset w30, -16
684 ; CHECK-NEXT: movi v1.4s, #8
685 ; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s
686 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
687 ; CHECK-NEXT: bl vec_use
688 ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
689 ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
690 ; CHECK-NEXT: movi v0.4s, #2
691 ; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
692 ; CHECK-NEXT: add sp, sp, #32 // =32
694 %t0 = sub <4 x i32> <i32 8, i32 8, i32 8, i32 8>, %arg
695 call void @vec_use(<4 x i32> %t0)
696 %t1 = sub <4 x i32> <i32 2, i32 2, i32 2, i32 2>, %t0
700 define <4 x i32> @vec_const_sub_const_sub_nonsplat(<4 x i32> %arg) {
701 ; CHECK-LABEL: vec_const_sub_const_sub_nonsplat:
703 ; CHECK-NEXT: adrp x8, .LCPI44_0
704 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI44_0]
705 ; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
707 %t0 = sub <4 x i32> <i32 21, i32 undef, i32 8, i32 8>, %arg
708 %t1 = sub <4 x i32> <i32 2, i32 3, i32 undef, i32 2>, %t0