1 ; RUN: llc < %s -debug-only=legalize-types 2>&1 | FileCheck %s --check-prefix=CHECK-LEGALIZATION
2 ; RUN: llc < %s | FileCheck %s
5 target triple = "aarch64-unknown-linux-gnu"
6 attributes #0 = {"target-features"="+sve"}
8 declare <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64>, <8 x i64>, i64)
9 declare <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double>, <8 x double>, i64)
11 define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> %b) #0 {
12 ; CHECK-LEGALIZATION: Legally typed node: [[T1:t[0-9]+]]: nxv2i64 = insert_subvector {{t[0-9]+}}, {{t[0-9]+}}, Constant:i64<0>
13 ; CHECK-LEGALIZATION: Legally typed node: [[T2:t[0-9]+]]: nxv2i64 = insert_subvector [[T1]], {{t[0-9]+}}, Constant:i64<2>
14 ; CHECK-LEGALIZATION: Legally typed node: [[T3:t[0-9]+]]: nxv2i64 = insert_subvector [[T2]], {{t[0-9]+}}, Constant:i64<4>
15 ; CHECK-LEGALIZATION: Legally typed node: [[T4:t[0-9]+]]: nxv2i64 = insert_subvector [[T3]], {{t[0-9]+}}, Constant:i64<6>
17 ; CHECK-LABEL: test_nxv2i64_v8i64:
19 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
20 ; CHECK-NEXT: addvl sp, sp, #-4
21 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
22 ; CHECK-NEXT: .cfi_offset w29, -16
23 ; CHECK-NEXT: ptrue p0.d
25 ; CHECK-NEXT: st1d { z0.d }, p0, [sp]
26 ; CHECK-NEXT: str q1, [sp]
27 ; CHECK-NEXT: sub x9, x9, #2
28 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp]
29 ; CHECK-NEXT: mov w8, #2
30 ; CHECK-NEXT: cmp x9, #2
31 ; CHECK-NEXT: csel x8, x9, x8, lo
32 ; CHECK-NEXT: addvl x10, sp, #1
33 ; CHECK-NEXT: lsl x8, x8, #3
34 ; CHECK-NEXT: st1d { z0.d }, p0, [sp, #1, mul vl]
35 ; CHECK-NEXT: str q2, [x10, x8]
36 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #1, mul vl]
37 ; CHECK-NEXT: mov w8, #4
38 ; CHECK-NEXT: cmp x9, #4
39 ; CHECK-NEXT: csel x8, x9, x8, lo
40 ; CHECK-NEXT: addvl x10, sp, #2
41 ; CHECK-NEXT: lsl x8, x8, #3
42 ; CHECK-NEXT: st1d { z0.d }, p0, [sp, #2, mul vl]
43 ; CHECK-NEXT: str q3, [x10, x8]
44 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #2, mul vl]
45 ; CHECK-NEXT: mov w8, #6
46 ; CHECK-NEXT: cmp x9, #6
47 ; CHECK-NEXT: csel x8, x9, x8, lo
48 ; CHECK-NEXT: addvl x10, sp, #3
49 ; CHECK-NEXT: lsl x8, x8, #3
50 ; CHECK-NEXT: st1d { z0.d }, p0, [sp, #3, mul vl]
51 ; CHECK-NEXT: str q4, [x10, x8]
52 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #3, mul vl]
53 ; CHECK-NEXT: addvl sp, sp, #4
54 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
57 %r = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> %a, <8 x i64> %b, i64 0)
58 ret <vscale x 2 x i64> %r
61 define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x double> %b) #0 {
62 ; CHECK-LEGALIZATION: Legally typed node: [[T1:t[0-9]+]]: nxv2f64 = insert_subvector {{t[0-9]+}}, {{t[0-9]+}}, Constant:i64<0>
63 ; CHECK-LEGALIZATION: Legally typed node: [[T2:t[0-9]+]]: nxv2f64 = insert_subvector [[T1]], {{t[0-9]+}}, Constant:i64<2>
64 ; CHECK-LEGALIZATION: Legally typed node: [[T3:t[0-9]+]]: nxv2f64 = insert_subvector [[T2]], {{t[0-9]+}}, Constant:i64<4>
65 ; CHECK-LEGALIZATION: Legally typed node: [[T4:t[0-9]+]]: nxv2f64 = insert_subvector [[T3]], {{t[0-9]+}}, Constant:i64<6>
67 ; CHECK-LABEL: test_nxv2f64_v8f64:
69 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
70 ; CHECK-NEXT: addvl sp, sp, #-4
71 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
72 ; CHECK-NEXT: .cfi_offset w29, -16
73 ; CHECK-NEXT: ptrue p0.d
75 ; CHECK-NEXT: st1d { z0.d }, p0, [sp]
76 ; CHECK-NEXT: str q1, [sp]
77 ; CHECK-NEXT: sub x9, x9, #2
78 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp]
79 ; CHECK-NEXT: mov w8, #2
80 ; CHECK-NEXT: cmp x9, #2
81 ; CHECK-NEXT: csel x8, x9, x8, lo
82 ; CHECK-NEXT: addvl x10, sp, #1
83 ; CHECK-NEXT: lsl x8, x8, #3
84 ; CHECK-NEXT: st1d { z0.d }, p0, [sp, #1, mul vl]
85 ; CHECK-NEXT: str q2, [x10, x8]
86 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #1, mul vl]
87 ; CHECK-NEXT: mov w8, #4
88 ; CHECK-NEXT: cmp x9, #4
89 ; CHECK-NEXT: csel x8, x9, x8, lo
90 ; CHECK-NEXT: addvl x10, sp, #2
91 ; CHECK-NEXT: lsl x8, x8, #3
92 ; CHECK-NEXT: st1d { z0.d }, p0, [sp, #2, mul vl]
93 ; CHECK-NEXT: str q3, [x10, x8]
94 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #2, mul vl]
95 ; CHECK-NEXT: mov w8, #6
96 ; CHECK-NEXT: cmp x9, #6
97 ; CHECK-NEXT: csel x8, x9, x8, lo
98 ; CHECK-NEXT: addvl x10, sp, #3
99 ; CHECK-NEXT: lsl x8, x8, #3
100 ; CHECK-NEXT: st1d { z0.d }, p0, [sp, #3, mul vl]
101 ; CHECK-NEXT: str q4, [x10, x8]
102 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #3, mul vl]
103 ; CHECK-NEXT: addvl sp, sp, #4
104 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
107 %r = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> %a, <8 x double> %b, i64 0)
108 ret <vscale x 2 x double> %r