1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s | FileCheck %s
4 target triple = "aarch64-unknown-linux-gnu"
6 ; Ensure we don't crash by trying to fold fixed length frame indexes into
7 ; loads/stores that don't support an appropriate addressing mode, hence creating
8 ; too many extra vregs during frame lowering, when we don't have an emergency
11 define dso_local void @func1(ptr %v1, ptr %v2, ptr %v3, ptr %v4, ptr %v5, ptr %v6, ptr %v7, ptr %v8,
14 ; CHECK-NEXT: str x29, [sp, #-48]! // 8-byte Folded Spill
15 ; CHECK-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill
16 ; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
17 ; CHECK-NEXT: .cfi_def_cfa_offset 48
18 ; CHECK-NEXT: .cfi_offset w19, -8
19 ; CHECK-NEXT: .cfi_offset w20, -16
20 ; CHECK-NEXT: .cfi_offset w21, -24
21 ; CHECK-NEXT: .cfi_offset w22, -32
22 ; CHECK-NEXT: .cfi_offset w29, -48
23 ; CHECK-NEXT: ptrue p0.d
24 ; CHECK-NEXT: add x10, sp, #176
25 ; CHECK-NEXT: add x8, sp, #48
26 ; CHECK-NEXT: add x9, sp, #144
27 ; CHECK-NEXT: add x20, sp, #176
28 ; CHECK-NEXT: ldr x15, [sp, #104]
29 ; CHECK-NEXT: ld1d { z3.d }, p0/z, [x10]
30 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8]
31 ; CHECK-NEXT: add x8, sp, #112
32 ; CHECK-NEXT: ld1d { z2.d }, p0/z, [x9]
33 ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x8]
34 ; CHECK-NEXT: ldur q4, [sp, #88]
35 ; CHECK-NEXT: ldp x9, x8, [sp, #328]
36 ; CHECK-NEXT: ldr x19, [sp, #272]
37 ; CHECK-NEXT: ldp x11, x10, [sp, #312]
38 ; CHECK-NEXT: ldp x13, x12, [sp, #296]
39 ; CHECK-NEXT: ldp x18, x14, [sp, #280]
40 ; CHECK-NEXT: ldp x16, x17, [sp, #208]
41 ; CHECK-NEXT: ldp x21, x22, [sp, #352]
42 ; CHECK-NEXT: st1d { z3.d }, p0, [x20]
43 ; CHECK-NEXT: add x20, sp, #144
44 ; CHECK-NEXT: st1d { z2.d }, p0, [x20]
45 ; CHECK-NEXT: add x20, sp, #112
46 ; CHECK-NEXT: st1d { z1.d }, p0, [x20]
47 ; CHECK-NEXT: add x20, sp, #48
48 ; CHECK-NEXT: st1d { z0.d }, p0, [x20]
49 ; CHECK-NEXT: stp x21, x22, [sp, #352]
50 ; CHECK-NEXT: ldp x22, x21, [sp, #16] // 16-byte Folded Reload
51 ; CHECK-NEXT: stp x19, x18, [sp, #272]
52 ; CHECK-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
53 ; CHECK-NEXT: stp x16, x17, [sp, #208]
54 ; CHECK-NEXT: stur q4, [sp, #88]
55 ; CHECK-NEXT: str x15, [sp, #104]
56 ; CHECK-NEXT: stp x14, x13, [sp, #288]
57 ; CHECK-NEXT: stp x12, x11, [sp, #304]
58 ; CHECK-NEXT: stp x10, x9, [sp, #320]
59 ; CHECK-NEXT: str x8, [sp, #336]
60 ; CHECK-NEXT: ldr x29, [sp], #48 // 8-byte Folded Reload
62 ptr %v9, ptr %v10, ptr %v11, ptr %v12, ptr %v13, ptr %v14, ptr %v15, ptr %v16,
63 ptr %v17, ptr %v18, ptr %v19, ptr %v20, ptr %v21, ptr %v22, ptr %v23, ptr %v24,
64 ptr %v25, ptr %v26, ptr %v27, ptr %v28, ptr %v29, ptr %v30, ptr %v31, ptr %v32,
65 ptr %v33, ptr %v34, ptr %v35, ptr %v36, ptr %v37, ptr %v38, ptr %v39, ptr %v40,
66 ptr %v41, ptr %v42, ptr %v43, ptr %v44, ptr %v45, ptr %v46, ptr %v47, ptr %v48,
68 tail call void @func2(ptr %v1, ptr %v2, ptr %v3, ptr %v4, ptr %v5, ptr %v6, ptr %v7, ptr %v8,
69 ptr %v9, ptr %v10, ptr %v11, ptr %v12, ptr undef, ptr %v14, ptr %v15, ptr %v16,
70 ptr %v17, ptr %v18, ptr %v19, ptr %v20, ptr %v21, ptr %v22, ptr %v23, ptr %v24,
71 ptr %v25, ptr %v26, ptr %v27, ptr %v28, ptr %v29, ptr %v30, ptr undef, ptr undef,
72 ptr undef, ptr undef, ptr undef, ptr undef, ptr %v37, ptr %v38, ptr %v39, ptr %v40,
73 ptr %v41, ptr %v42, ptr %v43, ptr %v44, ptr %v45, ptr undef, ptr %v47, ptr %v48,
78 declare dso_local void @func2(ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr,
79 ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr,
80 ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr,
81 ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr,
82 ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr,
83 ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr,
86 attributes #0 = { "target-features"="+sve" vscale_range(2,2) }