1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming -verify-machineinstrs < %s | FileCheck %s --check-prefixes=STRIDED
3 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CONTIGUOUS
5 define <vscale x 32 x i8> @ldnt1_x2_i8_z0_z8(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %z1, target("aarch64.svcount") %pn, ptr %ptr) nounwind {
6 ; STRIDED-LABEL: ldnt1_x2_i8_z0_z8:
8 ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
9 ; STRIDED-NEXT: addvl sp, sp, #-17
10 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
11 ; STRIDED-NEXT: ptrue pn8.b
12 ; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
13 ; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
14 ; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
15 ; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
16 ; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
17 ; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
18 ; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
19 ; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill
20 ; STRIDED-NEXT: mov p8.b, p0.b
21 ; STRIDED-NEXT: ldnt1b { z0.b, z8.b }, pn8/z, [x0]
24 ; STRIDED-NEXT: //NO_APP
25 ; STRIDED-NEXT: ptrue pn8.b
26 ; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
27 ; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
28 ; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
29 ; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
30 ; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
31 ; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
32 ; STRIDED-NEXT: mov z1.d, z8.d
33 ; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
34 ; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload
35 ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
36 ; STRIDED-NEXT: addvl sp, sp, #17
37 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
40 ; CONTIGUOUS-LABEL: ldnt1_x2_i8_z0_z8:
41 ; CONTIGUOUS: // %bb.0:
42 ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
43 ; CONTIGUOUS-NEXT: addvl sp, sp, #-16
44 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
45 ; CONTIGUOUS-NEXT: ptrue pn8.b
46 ; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
47 ; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
48 ; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
49 ; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
50 ; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
51 ; CONTIGUOUS-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
52 ; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
53 ; CONTIGUOUS-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
54 ; CONTIGUOUS-NEXT: addvl sp, sp, #-2
55 ; CONTIGUOUS-NEXT: mov p8.b, p0.b
56 ; CONTIGUOUS-NEXT: ldnt1b { z0.b, z1.b }, pn8/z, [x0]
57 ; CONTIGUOUS-NEXT: str z0, [sp]
58 ; CONTIGUOUS-NEXT: str z1, [sp, #1, mul vl]
59 ; CONTIGUOUS-NEXT: //APP
60 ; CONTIGUOUS-NEXT: nop
61 ; CONTIGUOUS-NEXT: //NO_APP
62 ; CONTIGUOUS-NEXT: ldr z0, [sp]
63 ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
64 ; CONTIGUOUS-NEXT: addvl sp, sp, #2
65 ; CONTIGUOUS-NEXT: ptrue pn8.b
66 ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
67 ; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
68 ; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
69 ; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
70 ; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
71 ; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
72 ; CONTIGUOUS-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
73 ; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
74 ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
75 ; CONTIGUOUS-NEXT: addvl sp, sp, #16
76 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
77 ; CONTIGUOUS-NEXT: ret
78 %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv16i8(target("aarch64.svcount") %pn, ptr %ptr)
79 call void asm sideeffect "nop", "~{z1},~{z2},~{z3},~{z4},~{z5},~{z6},~{z7},~{z9},~{z10},~{z11},~{z12},~{z13},~{z14},~{z15},~{z16},~{z17},~{z18},~{z19},~{z20},~{z21},~{z22},~{z23},~{z24},~{z25},~{z26},~{z27},~{z28},~{z29},~{z30},~{z31}"() nounwind
80 %res.v0 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %res, 0
81 %v0 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> %res.v0, i64 0)
82 %res.v1 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %res, 1
83 %v1 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> %v0, <vscale x 16 x i8> %res.v1, i64 16)
84 ret <vscale x 32 x i8> %v1
87 define <vscale x 32 x i8> @ldnt1_x2_i8_z0_z8_scalar(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %z1, target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
88 ; STRIDED-LABEL: ldnt1_x2_i8_z0_z8_scalar:
90 ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
91 ; STRIDED-NEXT: addvl sp, sp, #-17
92 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
93 ; STRIDED-NEXT: ptrue pn8.b
94 ; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
95 ; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
96 ; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
97 ; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
98 ; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
99 ; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
100 ; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
101 ; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill
102 ; STRIDED-NEXT: mov p8.b, p0.b
103 ; STRIDED-NEXT: ldnt1b { z0.b, z8.b }, pn8/z, [x0, x1]
104 ; STRIDED-NEXT: //APP
106 ; STRIDED-NEXT: //NO_APP
107 ; STRIDED-NEXT: ptrue pn8.b
108 ; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
109 ; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
110 ; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
111 ; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
112 ; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
113 ; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
114 ; STRIDED-NEXT: mov z1.d, z8.d
115 ; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
116 ; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload
117 ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
118 ; STRIDED-NEXT: addvl sp, sp, #17
119 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
122 ; CONTIGUOUS-LABEL: ldnt1_x2_i8_z0_z8_scalar:
123 ; CONTIGUOUS: // %bb.0:
124 ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
125 ; CONTIGUOUS-NEXT: addvl sp, sp, #-16
126 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
127 ; CONTIGUOUS-NEXT: ptrue pn8.b
128 ; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
129 ; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
130 ; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
131 ; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
132 ; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
133 ; CONTIGUOUS-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
134 ; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
135 ; CONTIGUOUS-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
136 ; CONTIGUOUS-NEXT: addvl sp, sp, #-2
137 ; CONTIGUOUS-NEXT: mov p8.b, p0.b
138 ; CONTIGUOUS-NEXT: ldnt1b { z0.b, z1.b }, pn8/z, [x0, x1]
139 ; CONTIGUOUS-NEXT: str z0, [sp]
140 ; CONTIGUOUS-NEXT: str z1, [sp, #1, mul vl]
141 ; CONTIGUOUS-NEXT: //APP
142 ; CONTIGUOUS-NEXT: nop
143 ; CONTIGUOUS-NEXT: //NO_APP
144 ; CONTIGUOUS-NEXT: ldr z0, [sp]
145 ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
146 ; CONTIGUOUS-NEXT: addvl sp, sp, #2
147 ; CONTIGUOUS-NEXT: ptrue pn8.b
148 ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
149 ; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
150 ; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
151 ; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
152 ; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
153 ; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
154 ; CONTIGUOUS-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
155 ; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
156 ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
157 ; CONTIGUOUS-NEXT: addvl sp, sp, #16
158 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
159 ; CONTIGUOUS-NEXT: ret
160 %base = getelementptr i8, ptr %ptr, i64 %index
161 %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv16i8(target("aarch64.svcount") %pn, ptr %base)
162 call void asm sideeffect "nop", "~{z1},~{z2},~{z3},~{z4},~{z5},~{z6},~{z7},~{z9},~{z10},~{z11},~{z12},~{z13},~{z14},~{z15},~{z16},~{z17},~{z18},~{z19},~{z20},~{z21},~{z22},~{z23},~{z24},~{z25},~{z26},~{z27},~{z28},~{z29},~{z30},~{z31}"() nounwind
163 %res.v0 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %res, 0
164 %v0 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> %res.v0, i64 0)
165 %res.v1 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %res, 1
166 %v1 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> %v0, <vscale x 16 x i8> %res.v1, i64 16)
167 ret <vscale x 32 x i8> %v1
170 define <vscale x 16 x i16> @ldnt1_x2_i16_z0_z8(<vscale x 8 x i16> %unused, <vscale x 8 x i16> %z1, target("aarch64.svcount") %pn, ptr %ptr) nounwind {
171 ; STRIDED-LABEL: ldnt1_x2_i16_z0_z8:
173 ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
174 ; STRIDED-NEXT: addvl sp, sp, #-17
175 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
176 ; STRIDED-NEXT: ptrue pn8.b
177 ; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
178 ; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
179 ; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
180 ; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
181 ; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
182 ; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
183 ; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
184 ; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill
185 ; STRIDED-NEXT: mov p8.b, p0.b
186 ; STRIDED-NEXT: ldnt1h { z0.h, z8.h }, pn8/z, [x0]
187 ; STRIDED-NEXT: //APP
189 ; STRIDED-NEXT: //NO_APP
190 ; STRIDED-NEXT: ptrue pn8.b
191 ; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
192 ; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
193 ; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
194 ; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
195 ; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
196 ; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
197 ; STRIDED-NEXT: mov z1.d, z8.d
198 ; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
199 ; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload
200 ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
201 ; STRIDED-NEXT: addvl sp, sp, #17
202 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
205 ; CONTIGUOUS-LABEL: ldnt1_x2_i16_z0_z8:
206 ; CONTIGUOUS: // %bb.0:
207 ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
208 ; CONTIGUOUS-NEXT: addvl sp, sp, #-16
209 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
210 ; CONTIGUOUS-NEXT: ptrue pn8.b
211 ; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
212 ; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
213 ; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
214 ; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
215 ; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
216 ; CONTIGUOUS-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
217 ; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
218 ; CONTIGUOUS-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
219 ; CONTIGUOUS-NEXT: addvl sp, sp, #-2
220 ; CONTIGUOUS-NEXT: mov p8.b, p0.b
221 ; CONTIGUOUS-NEXT: ldnt1h { z0.h, z1.h }, pn8/z, [x0]
222 ; CONTIGUOUS-NEXT: str z0, [sp]
223 ; CONTIGUOUS-NEXT: str z1, [sp, #1, mul vl]
224 ; CONTIGUOUS-NEXT: //APP
225 ; CONTIGUOUS-NEXT: nop
226 ; CONTIGUOUS-NEXT: //NO_APP
227 ; CONTIGUOUS-NEXT: ldr z0, [sp]
228 ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
229 ; CONTIGUOUS-NEXT: addvl sp, sp, #2
230 ; CONTIGUOUS-NEXT: ptrue pn8.b
231 ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
232 ; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
233 ; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
234 ; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
235 ; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
236 ; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
237 ; CONTIGUOUS-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
238 ; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
239 ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
240 ; CONTIGUOUS-NEXT: addvl sp, sp, #16
241 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
242 ; CONTIGUOUS-NEXT: ret
243 %res = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv8i16(target("aarch64.svcount") %pn, ptr %ptr)
244 call void asm sideeffect "nop", "~{z1},~{z2},~{z3},~{z4},~{z5},~{z6},~{z7},~{z9},~{z10},~{z11},~{z12},~{z13},~{z14},~{z15},~{z16},~{z17},~{z18},~{z19},~{z20},~{z21},~{z22},~{z23},~{z24},~{z25},~{z26},~{z27},~{z28},~{z29},~{z30},~{z31}"() nounwind
245 %res.v0 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %res, 0
246 %v0 = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> poison, <vscale x 8 x i16> %res.v0, i64 0)
247 %res.v1 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %res, 1
248 %v1 = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> %v0, <vscale x 8 x i16> %res.v1, i64 8)
249 ret <vscale x 16 x i16> %v1
252 define <vscale x 16 x i16> @ldnt1_x2_i16_z0_z8_scalar(<vscale x 8 x i16> %unused, <vscale x 8 x i16> %z1, target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
253 ; STRIDED-LABEL: ldnt1_x2_i16_z0_z8_scalar:
255 ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
256 ; STRIDED-NEXT: addvl sp, sp, #-17
257 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
258 ; STRIDED-NEXT: ptrue pn8.b
259 ; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
260 ; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
261 ; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
262 ; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
263 ; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
264 ; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
265 ; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
266 ; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill
267 ; STRIDED-NEXT: mov p8.b, p0.b
268 ; STRIDED-NEXT: ldnt1h { z0.h, z8.h }, pn8/z, [x0, x1, lsl #1]
269 ; STRIDED-NEXT: //APP
271 ; STRIDED-NEXT: //NO_APP
272 ; STRIDED-NEXT: ptrue pn8.b
273 ; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
274 ; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
275 ; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
276 ; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
277 ; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
278 ; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
279 ; STRIDED-NEXT: mov z1.d, z8.d
280 ; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
281 ; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload
282 ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
283 ; STRIDED-NEXT: addvl sp, sp, #17
284 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
287 ; CONTIGUOUS-LABEL: ldnt1_x2_i16_z0_z8_scalar:
288 ; CONTIGUOUS: // %bb.0:
289 ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
290 ; CONTIGUOUS-NEXT: addvl sp, sp, #-16
291 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
292 ; CONTIGUOUS-NEXT: ptrue pn8.b
293 ; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
294 ; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
295 ; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
296 ; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
297 ; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
298 ; CONTIGUOUS-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
299 ; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
300 ; CONTIGUOUS-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
301 ; CONTIGUOUS-NEXT: addvl sp, sp, #-2
302 ; CONTIGUOUS-NEXT: mov p8.b, p0.b
303 ; CONTIGUOUS-NEXT: ldnt1h { z0.h, z1.h }, pn8/z, [x0, x1, lsl #1]
304 ; CONTIGUOUS-NEXT: str z0, [sp]
305 ; CONTIGUOUS-NEXT: str z1, [sp, #1, mul vl]
306 ; CONTIGUOUS-NEXT: //APP
307 ; CONTIGUOUS-NEXT: nop
308 ; CONTIGUOUS-NEXT: //NO_APP
309 ; CONTIGUOUS-NEXT: ldr z0, [sp]
310 ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
311 ; CONTIGUOUS-NEXT: addvl sp, sp, #2
312 ; CONTIGUOUS-NEXT: ptrue pn8.b
313 ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
314 ; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
315 ; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
316 ; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
317 ; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
318 ; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
319 ; CONTIGUOUS-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
320 ; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
321 ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
322 ; CONTIGUOUS-NEXT: addvl sp, sp, #16
323 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
324 ; CONTIGUOUS-NEXT: ret
325 %base = getelementptr i16, ptr %ptr, i64 %index
326 %res = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv8i16(target("aarch64.svcount") %pn, ptr %base)
327 call void asm sideeffect "nop", "~{z1},~{z2},~{z3},~{z4},~{z5},~{z6},~{z7},~{z9},~{z10},~{z11},~{z12},~{z13},~{z14},~{z15},~{z16},~{z17},~{z18},~{z19},~{z20},~{z21},~{z22},~{z23},~{z24},~{z25},~{z26},~{z27},~{z28},~{z29},~{z30},~{z31}"() nounwind
328 %res.v0 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %res, 0
329 %v0 = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> poison, <vscale x 8 x i16> %res.v0, i64 0)
330 %res.v1 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %res, 1
331 %v1 = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> %v0, <vscale x 8 x i16> %res.v1, i64 8)
332 ret <vscale x 16 x i16> %v1
335 define <vscale x 8 x i32> @ldnt1_x2_i32_z0_z8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %z1, target("aarch64.svcount") %pn, ptr %ptr) nounwind {
336 ; STRIDED-LABEL: ldnt1_x2_i32_z0_z8:
338 ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
339 ; STRIDED-NEXT: addvl sp, sp, #-17
340 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
341 ; STRIDED-NEXT: ptrue pn8.b
342 ; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
343 ; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
344 ; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
345 ; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
346 ; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
347 ; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
348 ; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
349 ; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill
350 ; STRIDED-NEXT: mov p8.b, p0.b
351 ; STRIDED-NEXT: ldnt1w { z0.s, z8.s }, pn8/z, [x0]
352 ; STRIDED-NEXT: //APP
354 ; STRIDED-NEXT: //NO_APP
355 ; STRIDED-NEXT: ptrue pn8.b
356 ; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
357 ; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
358 ; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
359 ; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
360 ; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
361 ; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
362 ; STRIDED-NEXT: mov z1.d, z8.d
363 ; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
364 ; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload
365 ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
366 ; STRIDED-NEXT: addvl sp, sp, #17
367 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
370 ; CONTIGUOUS-LABEL: ldnt1_x2_i32_z0_z8:
371 ; CONTIGUOUS: // %bb.0:
372 ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
373 ; CONTIGUOUS-NEXT: addvl sp, sp, #-16
374 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
375 ; CONTIGUOUS-NEXT: ptrue pn8.b
376 ; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
377 ; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
378 ; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
379 ; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
380 ; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
381 ; CONTIGUOUS-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
382 ; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
383 ; CONTIGUOUS-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
384 ; CONTIGUOUS-NEXT: addvl sp, sp, #-2
385 ; CONTIGUOUS-NEXT: mov p8.b, p0.b
386 ; CONTIGUOUS-NEXT: ldnt1w { z0.s, z1.s }, pn8/z, [x0]
387 ; CONTIGUOUS-NEXT: str z0, [sp]
388 ; CONTIGUOUS-NEXT: str z1, [sp, #1, mul vl]
389 ; CONTIGUOUS-NEXT: //APP
390 ; CONTIGUOUS-NEXT: nop
391 ; CONTIGUOUS-NEXT: //NO_APP
392 ; CONTIGUOUS-NEXT: ldr z0, [sp]
393 ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
394 ; CONTIGUOUS-NEXT: addvl sp, sp, #2
395 ; CONTIGUOUS-NEXT: ptrue pn8.b
396 ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
397 ; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
398 ; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
399 ; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
400 ; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
401 ; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
402 ; CONTIGUOUS-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
403 ; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
404 ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
405 ; CONTIGUOUS-NEXT: addvl sp, sp, #16
406 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
407 ; CONTIGUOUS-NEXT: ret
408 %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv4i32(target("aarch64.svcount") %pn, ptr %ptr)
409 call void asm sideeffect "nop", "~{z1},~{z2},~{z3},~{z4},~{z5},~{z6},~{z7},~{z9},~{z10},~{z11},~{z12},~{z13},~{z14},~{z15},~{z16},~{z17},~{z18},~{z19},~{z20},~{z21},~{z22},~{z23},~{z24},~{z25},~{z26},~{z27},~{z28},~{z29},~{z30},~{z31}"() nounwind
410 %res.v0 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %res, 0
411 %v0 = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> poison, <vscale x 4 x i32> %res.v0, i64 0)
412 %res.v1 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %res, 1
413 %v1 = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> %v0, <vscale x 4 x i32> %res.v1, i64 4)
414 ret <vscale x 8 x i32> %v1
417 define <vscale x 8 x i32> @ldnt1_x2_i32_z0_z8_scalar(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %z1, target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
418 ; STRIDED-LABEL: ldnt1_x2_i32_z0_z8_scalar:
420 ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
421 ; STRIDED-NEXT: addvl sp, sp, #-17
422 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
423 ; STRIDED-NEXT: ptrue pn8.b
424 ; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
425 ; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
426 ; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
427 ; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
428 ; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
429 ; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
430 ; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
431 ; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill
432 ; STRIDED-NEXT: mov p8.b, p0.b
433 ; STRIDED-NEXT: ldnt1w { z0.s, z8.s }, pn8/z, [x0, x1, lsl #2]
434 ; STRIDED-NEXT: //APP
436 ; STRIDED-NEXT: //NO_APP
437 ; STRIDED-NEXT: ptrue pn8.b
438 ; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
439 ; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
440 ; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
441 ; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
442 ; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
443 ; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
444 ; STRIDED-NEXT: mov z1.d, z8.d
445 ; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
446 ; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload
447 ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
448 ; STRIDED-NEXT: addvl sp, sp, #17
449 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
452 ; CONTIGUOUS-LABEL: ldnt1_x2_i32_z0_z8_scalar:
453 ; CONTIGUOUS: // %bb.0:
454 ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
455 ; CONTIGUOUS-NEXT: addvl sp, sp, #-16
456 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
457 ; CONTIGUOUS-NEXT: ptrue pn8.b
458 ; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
459 ; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
460 ; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
461 ; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
462 ; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
463 ; CONTIGUOUS-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
464 ; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
465 ; CONTIGUOUS-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
466 ; CONTIGUOUS-NEXT: addvl sp, sp, #-2
467 ; CONTIGUOUS-NEXT: mov p8.b, p0.b
468 ; CONTIGUOUS-NEXT: ldnt1w { z0.s, z1.s }, pn8/z, [x0, x1, lsl #2]
469 ; CONTIGUOUS-NEXT: str z0, [sp]
470 ; CONTIGUOUS-NEXT: str z1, [sp, #1, mul vl]
471 ; CONTIGUOUS-NEXT: //APP
472 ; CONTIGUOUS-NEXT: nop
473 ; CONTIGUOUS-NEXT: //NO_APP
474 ; CONTIGUOUS-NEXT: ldr z0, [sp]
475 ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
476 ; CONTIGUOUS-NEXT: addvl sp, sp, #2
477 ; CONTIGUOUS-NEXT: ptrue pn8.b
478 ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
479 ; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
480 ; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
481 ; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
482 ; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
483 ; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
484 ; CONTIGUOUS-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
485 ; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
486 ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
487 ; CONTIGUOUS-NEXT: addvl sp, sp, #16
488 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
489 ; CONTIGUOUS-NEXT: ret
490 %base = getelementptr i32, ptr %ptr, i64 %index
491 %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv4i32(target("aarch64.svcount") %pn, ptr %base)
492 call void asm sideeffect "nop", "~{z1},~{z2},~{z3},~{z4},~{z5},~{z6},~{z7},~{z9},~{z10},~{z11},~{z12},~{z13},~{z14},~{z15},~{z16},~{z17},~{z18},~{z19},~{z20},~{z21},~{z22},~{z23},~{z24},~{z25},~{z26},~{z27},~{z28},~{z29},~{z30},~{z31}"() nounwind
493 %res.v0 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %res, 0
494 %v0 = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> poison, <vscale x 4 x i32> %res.v0, i64 0)
495 %res.v1 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %res, 1
496 %v1 = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> %v0, <vscale x 4 x i32> %res.v1, i64 4)
497 ret <vscale x 8 x i32> %v1
500 define <vscale x 4 x i64> @ldnt1_x2_i64_z0_z8(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %z1, target("aarch64.svcount") %pn, ptr %ptr) nounwind {
501 ; STRIDED-LABEL: ldnt1_x2_i64_z0_z8:
503 ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
504 ; STRIDED-NEXT: addvl sp, sp, #-17
505 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
506 ; STRIDED-NEXT: ptrue pn8.b
507 ; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
508 ; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
509 ; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
510 ; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
511 ; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
512 ; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
513 ; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
514 ; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill
515 ; STRIDED-NEXT: mov p8.b, p0.b
516 ; STRIDED-NEXT: ldnt1d { z0.d, z8.d }, pn8/z, [x0]
517 ; STRIDED-NEXT: //APP
519 ; STRIDED-NEXT: //NO_APP
520 ; STRIDED-NEXT: ptrue pn8.b
521 ; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
522 ; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
523 ; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
524 ; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
525 ; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
526 ; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
527 ; STRIDED-NEXT: mov z1.d, z8.d
528 ; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
529 ; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload
530 ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
531 ; STRIDED-NEXT: addvl sp, sp, #17
532 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
535 ; CONTIGUOUS-LABEL: ldnt1_x2_i64_z0_z8:
536 ; CONTIGUOUS: // %bb.0:
537 ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
538 ; CONTIGUOUS-NEXT: addvl sp, sp, #-16
539 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
540 ; CONTIGUOUS-NEXT: ptrue pn8.b
541 ; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
542 ; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
543 ; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
544 ; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
545 ; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
546 ; CONTIGUOUS-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
547 ; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
548 ; CONTIGUOUS-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
549 ; CONTIGUOUS-NEXT: addvl sp, sp, #-2
550 ; CONTIGUOUS-NEXT: mov p8.b, p0.b
551 ; CONTIGUOUS-NEXT: ldnt1d { z0.d, z1.d }, pn8/z, [x0]
552 ; CONTIGUOUS-NEXT: str z0, [sp]
553 ; CONTIGUOUS-NEXT: str z1, [sp, #1, mul vl]
554 ; CONTIGUOUS-NEXT: //APP
555 ; CONTIGUOUS-NEXT: nop
556 ; CONTIGUOUS-NEXT: //NO_APP
557 ; CONTIGUOUS-NEXT: ldr z0, [sp]
558 ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
559 ; CONTIGUOUS-NEXT: addvl sp, sp, #2
560 ; CONTIGUOUS-NEXT: ptrue pn8.b
561 ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
562 ; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
563 ; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
564 ; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
565 ; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
566 ; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
567 ; CONTIGUOUS-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
568 ; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
569 ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
570 ; CONTIGUOUS-NEXT: addvl sp, sp, #16
571 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
572 ; CONTIGUOUS-NEXT: ret
573 %res = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv2i64(target("aarch64.svcount") %pn, ptr %ptr)
574 call void asm sideeffect "nop", "~{z1},~{z2},~{z3},~{z4},~{z5},~{z6},~{z7},~{z9},~{z10},~{z11},~{z12},~{z13},~{z14},~{z15},~{z16},~{z17},~{z18},~{z19},~{z20},~{z21},~{z22},~{z23},~{z24},~{z25},~{z26},~{z27},~{z28},~{z29},~{z30},~{z31}"() nounwind
575 %res.v0 = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } %res, 0
576 %v0 = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> poison, <vscale x 2 x i64> %res.v0, i64 0)
577 %res.v1 = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } %res, 1
578 %v1 = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> %v0, <vscale x 2 x i64> %res.v1, i64 2)
579 ret <vscale x 4 x i64> %v1
582 define <vscale x 4 x i64> @ldnt1_x2_i64_z0_z8_scalar(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %z1, target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
583 ; STRIDED-LABEL: ldnt1_x2_i64_z0_z8_scalar:
585 ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
586 ; STRIDED-NEXT: addvl sp, sp, #-17
587 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
588 ; STRIDED-NEXT: ptrue pn8.b
589 ; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
590 ; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
591 ; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
592 ; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
593 ; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
594 ; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
595 ; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
596 ; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill
597 ; STRIDED-NEXT: mov p8.b, p0.b
598 ; STRIDED-NEXT: ldnt1d { z0.d, z8.d }, pn8/z, [x0, x1, lsl #3]
599 ; STRIDED-NEXT: //APP
601 ; STRIDED-NEXT: //NO_APP
602 ; STRIDED-NEXT: ptrue pn8.b
603 ; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
604 ; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
605 ; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
606 ; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
607 ; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
608 ; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
609 ; STRIDED-NEXT: mov z1.d, z8.d
610 ; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
611 ; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload
612 ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
613 ; STRIDED-NEXT: addvl sp, sp, #17
614 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
617 ; CONTIGUOUS-LABEL: ldnt1_x2_i64_z0_z8_scalar:
618 ; CONTIGUOUS: // %bb.0:
619 ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
620 ; CONTIGUOUS-NEXT: addvl sp, sp, #-16
621 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
622 ; CONTIGUOUS-NEXT: ptrue pn8.b
623 ; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
624 ; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
625 ; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
626 ; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
627 ; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
628 ; CONTIGUOUS-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
629 ; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
630 ; CONTIGUOUS-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
631 ; CONTIGUOUS-NEXT: addvl sp, sp, #-2
632 ; CONTIGUOUS-NEXT: mov p8.b, p0.b
633 ; CONTIGUOUS-NEXT: ldnt1d { z0.d, z1.d }, pn8/z, [x0, x1, lsl #3]
634 ; CONTIGUOUS-NEXT: str z0, [sp]
635 ; CONTIGUOUS-NEXT: str z1, [sp, #1, mul vl]
636 ; CONTIGUOUS-NEXT: //APP
637 ; CONTIGUOUS-NEXT: nop
638 ; CONTIGUOUS-NEXT: //NO_APP
639 ; CONTIGUOUS-NEXT: ldr z0, [sp]
640 ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
641 ; CONTIGUOUS-NEXT: addvl sp, sp, #2
642 ; CONTIGUOUS-NEXT: ptrue pn8.b
643 ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
644 ; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
645 ; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
646 ; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
647 ; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
648 ; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
649 ; CONTIGUOUS-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
650 ; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
651 ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
652 ; CONTIGUOUS-NEXT: addvl sp, sp, #16
653 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
654 ; CONTIGUOUS-NEXT: ret
655 %base = getelementptr i64, ptr %ptr, i64 %index
656 %res = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv2i64(target("aarch64.svcount") %pn, ptr %base)
657 call void asm sideeffect "nop", "~{z1},~{z2},~{z3},~{z4},~{z5},~{z6},~{z7},~{z9},~{z10},~{z11},~{z12},~{z13},~{z14},~{z15},~{z16},~{z17},~{z18},~{z19},~{z20},~{z21},~{z22},~{z23},~{z24},~{z25},~{z26},~{z27},~{z28},~{z29},~{z30},~{z31}"() nounwind
658 %res.v0 = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } %res, 0
659 %v0 = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> poison, <vscale x 2 x i64> %res.v0, i64 0)
660 %res.v1 = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } %res, 1
661 %v1 = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> %v0, <vscale x 2 x i64> %res.v1, i64 2)
662 ret <vscale x 4 x i64> %v1
665 define <vscale x 64 x i8> @ldnt1_x4_i8_z0_z4_z8_z12(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %z1, target("aarch64.svcount") %pn, ptr %ptr) nounwind {
666 ; STRIDED-LABEL: ldnt1_x4_i8_z0_z4_z8_z12:
668 ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
669 ; STRIDED-NEXT: addvl sp, sp, #-17
670 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
671 ; STRIDED-NEXT: ptrue pn8.b
672 ; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
673 ; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
674 ; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
675 ; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
676 ; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
677 ; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
678 ; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
679 ; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill
680 ; STRIDED-NEXT: mov p8.b, p0.b
681 ; STRIDED-NEXT: ldnt1b { z0.b, z4.b, z8.b, z12.b }, pn8/z, [x0]
682 ; STRIDED-NEXT: //APP
684 ; STRIDED-NEXT: //NO_APP
685 ; STRIDED-NEXT: ptrue pn8.b
686 ; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
687 ; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
688 ; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
689 ; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
690 ; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
691 ; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
692 ; STRIDED-NEXT: mov z2.d, z8.d
693 ; STRIDED-NEXT: mov z3.d, z12.d
694 ; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
695 ; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload
696 ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
697 ; STRIDED-NEXT: mov z1.d, z4.d
698 ; STRIDED-NEXT: addvl sp, sp, #17
699 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
702 ; CONTIGUOUS-LABEL: ldnt1_x4_i8_z0_z4_z8_z12:
703 ; CONTIGUOUS: // %bb.0:
704 ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
705 ; CONTIGUOUS-NEXT: addvl sp, sp, #-15
706 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
707 ; CONTIGUOUS-NEXT: ptrue pn8.b
708 ; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
709 ; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
710 ; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
711 ; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
712 ; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
713 ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill
714 ; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill
715 ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
716 ; CONTIGUOUS-NEXT: addvl sp, sp, #-4
717 ; CONTIGUOUS-NEXT: mov p8.b, p0.b
718 ; CONTIGUOUS-NEXT: ldnt1b { z0.b - z3.b }, pn8/z, [x0]
719 ; CONTIGUOUS-NEXT: str z0, [sp]
720 ; CONTIGUOUS-NEXT: str z1, [sp, #1, mul vl]
721 ; CONTIGUOUS-NEXT: str z2, [sp, #2, mul vl]
722 ; CONTIGUOUS-NEXT: str z3, [sp, #3, mul vl]
723 ; CONTIGUOUS-NEXT: //APP
724 ; CONTIGUOUS-NEXT: nop
725 ; CONTIGUOUS-NEXT: //NO_APP
726 ; CONTIGUOUS-NEXT: ldr z0, [sp]
727 ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
728 ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
729 ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
730 ; CONTIGUOUS-NEXT: addvl sp, sp, #4
731 ; CONTIGUOUS-NEXT: ptrue pn8.b
732 ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
733 ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
734 ; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
735 ; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
736 ; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
737 ; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
738 ; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
739 ; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload
740 ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
741 ; CONTIGUOUS-NEXT: addvl sp, sp, #15
742 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
743 ; CONTIGUOUS-NEXT: ret
744 %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv16i8(target("aarch64.svcount") %pn, ptr %ptr)
745 call void asm sideeffect "nop", "~{z1},~{z2},~{z3},~{z5},~{z6},~{z7},~{z9},~{z10},~{z11},~{z13},~{z14},~{z15},~{z16},~{z17},~{z18},~{z19},~{z20},~{z21},~{z22},~{z23},~{z24},~{z25},~{z26},~{z27},~{z28},~{z29},~{z30},~{z31}"() nounwind
746 %res.v0 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res, 0
747 %v0 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> poison, <vscale x 16 x i8> %res.v0, i64 0)
748 %res.v1 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res, 1
749 %v1 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %v0, <vscale x 16 x i8> %res.v1, i64 16)
750 %res.v2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res, 2
751 %v2 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %v1, <vscale x 16 x i8> %res.v2, i64 32)
752 %res.v3 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res, 3
753 %v3 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %v2, <vscale x 16 x i8> %res.v3, i64 48)
754 ret <vscale x 64 x i8> %v3
757 define <vscale x 64 x i8> @ldnt1_x4_i8_z0_z4_z8_z12_scalar(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %z1, target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
758 ; STRIDED-LABEL: ldnt1_x4_i8_z0_z4_z8_z12_scalar:
760 ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
761 ; STRIDED-NEXT: addvl sp, sp, #-17
762 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
763 ; STRIDED-NEXT: ptrue pn8.b
764 ; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
765 ; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
766 ; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
767 ; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
768 ; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
769 ; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
770 ; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
771 ; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill
772 ; STRIDED-NEXT: mov p8.b, p0.b
773 ; STRIDED-NEXT: ldnt1b { z0.b, z4.b, z8.b, z12.b }, pn8/z, [x0, x1]
774 ; STRIDED-NEXT: //APP
776 ; STRIDED-NEXT: //NO_APP
777 ; STRIDED-NEXT: ptrue pn8.b
778 ; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
779 ; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
780 ; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
781 ; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
782 ; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
783 ; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
784 ; STRIDED-NEXT: mov z2.d, z8.d
785 ; STRIDED-NEXT: mov z3.d, z12.d
786 ; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
787 ; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload
788 ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
789 ; STRIDED-NEXT: mov z1.d, z4.d
790 ; STRIDED-NEXT: addvl sp, sp, #17
791 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
794 ; CONTIGUOUS-LABEL: ldnt1_x4_i8_z0_z4_z8_z12_scalar:
795 ; CONTIGUOUS: // %bb.0:
796 ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
797 ; CONTIGUOUS-NEXT: addvl sp, sp, #-15
798 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
799 ; CONTIGUOUS-NEXT: ptrue pn8.b
800 ; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
801 ; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
802 ; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
803 ; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
804 ; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
805 ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill
806 ; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill
807 ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
808 ; CONTIGUOUS-NEXT: addvl sp, sp, #-4
809 ; CONTIGUOUS-NEXT: mov p8.b, p0.b
810 ; CONTIGUOUS-NEXT: ldnt1b { z0.b - z3.b }, pn8/z, [x0, x1]
811 ; CONTIGUOUS-NEXT: str z0, [sp]
812 ; CONTIGUOUS-NEXT: str z1, [sp, #1, mul vl]
813 ; CONTIGUOUS-NEXT: str z2, [sp, #2, mul vl]
814 ; CONTIGUOUS-NEXT: str z3, [sp, #3, mul vl]
815 ; CONTIGUOUS-NEXT: //APP
816 ; CONTIGUOUS-NEXT: nop
817 ; CONTIGUOUS-NEXT: //NO_APP
818 ; CONTIGUOUS-NEXT: ldr z0, [sp]
819 ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
820 ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
821 ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
822 ; CONTIGUOUS-NEXT: addvl sp, sp, #4
823 ; CONTIGUOUS-NEXT: ptrue pn8.b
824 ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
825 ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
826 ; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
827 ; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
828 ; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
829 ; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
830 ; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
831 ; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload
832 ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
833 ; CONTIGUOUS-NEXT: addvl sp, sp, #15
834 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
835 ; CONTIGUOUS-NEXT: ret
836 %base = getelementptr i8, ptr %ptr, i64 %index
837 %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv16i8(target("aarch64.svcount") %pn, ptr %base)
838 call void asm sideeffect "nop", "~{z1},~{z2},~{z3},~{z5},~{z6},~{z7},~{z9},~{z10},~{z11},~{z13},~{z14},~{z15},~{z16},~{z17},~{z18},~{z19},~{z20},~{z21},~{z22},~{z23},~{z24},~{z25},~{z26},~{z27},~{z28},~{z29},~{z30},~{z31}"() nounwind
839 %res.v0 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res, 0
840 %v0 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> poison, <vscale x 16 x i8> %res.v0, i64 0)
841 %res.v1 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res, 1
842 %v1 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %v0, <vscale x 16 x i8> %res.v1, i64 16)
843 %res.v2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res, 2
844 %v2 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %v1, <vscale x 16 x i8> %res.v2, i64 32)
845 %res.v3 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res, 3
846 %v3 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %v2, <vscale x 16 x i8> %res.v3, i64 48)
847 ret <vscale x 64 x i8> %v3
850 define <vscale x 32 x i16> @ldnt1_x4_i16_z0_z4_z8_z12(<vscale x 8 x i16> %unused, <vscale x 8 x i16> %z1, target("aarch64.svcount") %pn, ptr %ptr) nounwind {
851 ; STRIDED-LABEL: ldnt1_x4_i16_z0_z4_z8_z12:
853 ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
854 ; STRIDED-NEXT: addvl sp, sp, #-17
855 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
856 ; STRIDED-NEXT: ptrue pn8.b
857 ; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
858 ; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
859 ; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
860 ; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
861 ; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
862 ; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
863 ; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
864 ; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill
865 ; STRIDED-NEXT: mov p8.b, p0.b
866 ; STRIDED-NEXT: ldnt1h { z0.h, z4.h, z8.h, z12.h }, pn8/z, [x0]
867 ; STRIDED-NEXT: //APP
869 ; STRIDED-NEXT: //NO_APP
870 ; STRIDED-NEXT: ptrue pn8.b
871 ; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
872 ; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
873 ; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
874 ; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
875 ; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
876 ; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
877 ; STRIDED-NEXT: mov z2.d, z8.d
878 ; STRIDED-NEXT: mov z3.d, z12.d
879 ; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
880 ; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload
881 ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
882 ; STRIDED-NEXT: mov z1.d, z4.d
883 ; STRIDED-NEXT: addvl sp, sp, #17
884 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
887 ; CONTIGUOUS-LABEL: ldnt1_x4_i16_z0_z4_z8_z12:
888 ; CONTIGUOUS: // %bb.0:
889 ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
890 ; CONTIGUOUS-NEXT: addvl sp, sp, #-15
891 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
892 ; CONTIGUOUS-NEXT: ptrue pn8.b
893 ; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
894 ; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
895 ; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
896 ; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
897 ; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
898 ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill
899 ; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill
900 ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
901 ; CONTIGUOUS-NEXT: addvl sp, sp, #-4
902 ; CONTIGUOUS-NEXT: mov p8.b, p0.b
903 ; CONTIGUOUS-NEXT: ldnt1h { z0.h - z3.h }, pn8/z, [x0]
904 ; CONTIGUOUS-NEXT: str z0, [sp]
905 ; CONTIGUOUS-NEXT: str z1, [sp, #1, mul vl]
906 ; CONTIGUOUS-NEXT: str z2, [sp, #2, mul vl]
907 ; CONTIGUOUS-NEXT: str z3, [sp, #3, mul vl]
908 ; CONTIGUOUS-NEXT: //APP
909 ; CONTIGUOUS-NEXT: nop
910 ; CONTIGUOUS-NEXT: //NO_APP
911 ; CONTIGUOUS-NEXT: ldr z0, [sp]
912 ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
913 ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
914 ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
915 ; CONTIGUOUS-NEXT: addvl sp, sp, #4
916 ; CONTIGUOUS-NEXT: ptrue pn8.b
917 ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
918 ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
919 ; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
920 ; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
921 ; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
922 ; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
923 ; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
924 ; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload
925 ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
926 ; CONTIGUOUS-NEXT: addvl sp, sp, #15
927 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
928 ; CONTIGUOUS-NEXT: ret
929 %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv8i16(target("aarch64.svcount") %pn, ptr %ptr)
930 call void asm sideeffect "nop", "~{z1},~{z2},~{z3},~{z5},~{z6},~{z7},~{z9},~{z10},~{z11},~{z13},~{z14},~{z15},~{z16},~{z17},~{z18},~{z19},~{z20},~{z21},~{z22},~{z23},~{z24},~{z25},~{z26},~{z27},~{z28},~{z29},~{z30},~{z31}"() nounwind
931 %res.v0 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res, 0
932 %v0 = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> poison, <vscale x 8 x i16> %res.v0, i64 0)
933 %res.v1 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res, 1
934 %v1 = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> %v0, <vscale x 8 x i16> %res.v1, i64 8)
935 %res.v2 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res, 2
936 %v2 = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> %v1, <vscale x 8 x i16> %res.v2, i64 16)
937 %res.v3 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res, 3
938 %v3 = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> %v2, <vscale x 8 x i16> %res.v3, i64 24)
939 ret <vscale x 32 x i16> %v3
942 define <vscale x 32 x i16> @ldnt1_x4_i16_z0_z4_z8_z12_scalar(<vscale x 8 x i16> %unused, <vscale x 8 x i16> %z1, target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
943 ; STRIDED-LABEL: ldnt1_x4_i16_z0_z4_z8_z12_scalar:
945 ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
946 ; STRIDED-NEXT: addvl sp, sp, #-17
947 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
948 ; STRIDED-NEXT: ptrue pn8.b
949 ; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
950 ; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
951 ; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
952 ; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
953 ; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
954 ; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
955 ; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
956 ; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill
957 ; STRIDED-NEXT: mov p8.b, p0.b
958 ; STRIDED-NEXT: ldnt1h { z0.h, z4.h, z8.h, z12.h }, pn8/z, [x0, x1, lsl #1]
959 ; STRIDED-NEXT: //APP
961 ; STRIDED-NEXT: //NO_APP
962 ; STRIDED-NEXT: ptrue pn8.b
963 ; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
964 ; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
965 ; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
966 ; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
967 ; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
968 ; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
969 ; STRIDED-NEXT: mov z2.d, z8.d
970 ; STRIDED-NEXT: mov z3.d, z12.d
971 ; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
972 ; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload
973 ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
974 ; STRIDED-NEXT: mov z1.d, z4.d
975 ; STRIDED-NEXT: addvl sp, sp, #17
976 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
979 ; CONTIGUOUS-LABEL: ldnt1_x4_i16_z0_z4_z8_z12_scalar:
980 ; CONTIGUOUS: // %bb.0:
981 ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
982 ; CONTIGUOUS-NEXT: addvl sp, sp, #-15
983 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
984 ; CONTIGUOUS-NEXT: ptrue pn8.b
985 ; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
986 ; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
987 ; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
988 ; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
989 ; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
990 ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill
991 ; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill
992 ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
993 ; CONTIGUOUS-NEXT: addvl sp, sp, #-4
994 ; CONTIGUOUS-NEXT: mov p8.b, p0.b
995 ; CONTIGUOUS-NEXT: ldnt1h { z0.h - z3.h }, pn8/z, [x0, x1, lsl #1]
996 ; CONTIGUOUS-NEXT: str z0, [sp]
997 ; CONTIGUOUS-NEXT: str z1, [sp, #1, mul vl]
998 ; CONTIGUOUS-NEXT: str z2, [sp, #2, mul vl]
999 ; CONTIGUOUS-NEXT: str z3, [sp, #3, mul vl]
1000 ; CONTIGUOUS-NEXT: //APP
1001 ; CONTIGUOUS-NEXT: nop
1002 ; CONTIGUOUS-NEXT: //NO_APP
1003 ; CONTIGUOUS-NEXT: ldr z0, [sp]
1004 ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
1005 ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
1006 ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
1007 ; CONTIGUOUS-NEXT: addvl sp, sp, #4
1008 ; CONTIGUOUS-NEXT: ptrue pn8.b
1009 ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
1010 ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
1011 ; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
1012 ; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
1013 ; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
1014 ; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
1015 ; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
1016 ; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload
1017 ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
1018 ; CONTIGUOUS-NEXT: addvl sp, sp, #15
1019 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1020 ; CONTIGUOUS-NEXT: ret
1021 %base = getelementptr i16, ptr %ptr, i64 %index
1022 %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv8i16(target("aarch64.svcount") %pn, ptr %base)
1023 call void asm sideeffect "nop", "~{z1},~{z2},~{z3},~{z5},~{z6},~{z7},~{z9},~{z10},~{z11},~{z13},~{z14},~{z15},~{z16},~{z17},~{z18},~{z19},~{z20},~{z21},~{z22},~{z23},~{z24},~{z25},~{z26},~{z27},~{z28},~{z29},~{z30},~{z31}"() nounwind
1024 %res.v0 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res, 0
1025 %v0 = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> poison, <vscale x 8 x i16> %res.v0, i64 0)
1026 %res.v1 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res, 1
1027 %v1 = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> %v0, <vscale x 8 x i16> %res.v1, i64 8)
1028 %res.v2 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res, 2
1029 %v2 = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> %v1, <vscale x 8 x i16> %res.v2, i64 16)
1030 %res.v3 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res, 3
1031 %v3 = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> %v2, <vscale x 8 x i16> %res.v3, i64 24)
1032 ret <vscale x 32 x i16> %v3
1035 define <vscale x 16 x i32> @ldnt1_x4_i32_z0_z4_z8_z12(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %z1, target("aarch64.svcount") %pn, ptr %ptr) nounwind {
1036 ; STRIDED-LABEL: ldnt1_x4_i32_z0_z4_z8_z12:
1037 ; STRIDED: // %bb.0:
1038 ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1039 ; STRIDED-NEXT: addvl sp, sp, #-17
1040 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
1041 ; STRIDED-NEXT: ptrue pn8.b
1042 ; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
1043 ; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
1044 ; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
1045 ; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
1046 ; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
1047 ; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
1048 ; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
1049 ; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill
1050 ; STRIDED-NEXT: mov p8.b, p0.b
1051 ; STRIDED-NEXT: ldnt1w { z0.s, z4.s, z8.s, z12.s }, pn8/z, [x0]
1052 ; STRIDED-NEXT: //APP
1054 ; STRIDED-NEXT: //NO_APP
1055 ; STRIDED-NEXT: ptrue pn8.b
1056 ; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
1057 ; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
1058 ; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
1059 ; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
1060 ; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
1061 ; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
1062 ; STRIDED-NEXT: mov z2.d, z8.d
1063 ; STRIDED-NEXT: mov z3.d, z12.d
1064 ; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
1065 ; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload
1066 ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
1067 ; STRIDED-NEXT: mov z1.d, z4.d
1068 ; STRIDED-NEXT: addvl sp, sp, #17
1069 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1072 ; CONTIGUOUS-LABEL: ldnt1_x4_i32_z0_z4_z8_z12:
1073 ; CONTIGUOUS: // %bb.0:
1074 ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1075 ; CONTIGUOUS-NEXT: addvl sp, sp, #-15
1076 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
1077 ; CONTIGUOUS-NEXT: ptrue pn8.b
1078 ; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
1079 ; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
1080 ; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
1081 ; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
1082 ; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
1083 ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill
1084 ; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill
1085 ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
1086 ; CONTIGUOUS-NEXT: addvl sp, sp, #-4
1087 ; CONTIGUOUS-NEXT: mov p8.b, p0.b
1088 ; CONTIGUOUS-NEXT: ldnt1w { z0.s - z3.s }, pn8/z, [x0]
1089 ; CONTIGUOUS-NEXT: str z0, [sp]
1090 ; CONTIGUOUS-NEXT: str z1, [sp, #1, mul vl]
1091 ; CONTIGUOUS-NEXT: str z2, [sp, #2, mul vl]
1092 ; CONTIGUOUS-NEXT: str z3, [sp, #3, mul vl]
1093 ; CONTIGUOUS-NEXT: //APP
1094 ; CONTIGUOUS-NEXT: nop
1095 ; CONTIGUOUS-NEXT: //NO_APP
1096 ; CONTIGUOUS-NEXT: ldr z0, [sp]
1097 ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
1098 ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
1099 ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
1100 ; CONTIGUOUS-NEXT: addvl sp, sp, #4
1101 ; CONTIGUOUS-NEXT: ptrue pn8.b
1102 ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
1103 ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
1104 ; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
1105 ; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
1106 ; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
1107 ; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
1108 ; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
1109 ; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload
1110 ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
1111 ; CONTIGUOUS-NEXT: addvl sp, sp, #15
1112 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1113 ; CONTIGUOUS-NEXT: ret
1114 %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv4i32(target("aarch64.svcount") %pn, ptr %ptr)
1115 call void asm sideeffect "nop", "~{z1},~{z2},~{z3},~{z5},~{z6},~{z7},~{z9},~{z10},~{z11},~{z13},~{z14},~{z15},~{z16},~{z17},~{z18},~{z19},~{z20},~{z21},~{z22},~{z23},~{z24},~{z25},~{z26},~{z27},~{z28},~{z29},~{z30},~{z31}"() nounwind
1116 %res.v0 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res, 0
1117 %v0 = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> poison, <vscale x 4 x i32> %res.v0, i64 0)
1118 %res.v1 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res, 1
1119 %v1 = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> %v0, <vscale x 4 x i32> %res.v1, i64 4)
1120 %res.v2 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res, 2
1121 %v2 = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> %v1, <vscale x 4 x i32> %res.v2, i64 8)
1122 %res.v3 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res, 3
1123 %v3 = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> %v2, <vscale x 4 x i32> %res.v3, i64 12)
1124 ret <vscale x 16 x i32> %v3
1127 define <vscale x 16 x i32> @ldnt1_x4_i32_z0_z4_z8_z12_scalar(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %z1, target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
1128 ; STRIDED-LABEL: ldnt1_x4_i32_z0_z4_z8_z12_scalar:
1129 ; STRIDED: // %bb.0:
1130 ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1131 ; STRIDED-NEXT: addvl sp, sp, #-17
1132 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
1133 ; STRIDED-NEXT: ptrue pn8.b
1134 ; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
1135 ; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
1136 ; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
1137 ; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
1138 ; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
1139 ; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
1140 ; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
1141 ; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill
1142 ; STRIDED-NEXT: mov p8.b, p0.b
1143 ; STRIDED-NEXT: ldnt1w { z0.s, z4.s, z8.s, z12.s }, pn8/z, [x0, x1, lsl #2]
1144 ; STRIDED-NEXT: //APP
1146 ; STRIDED-NEXT: //NO_APP
1147 ; STRIDED-NEXT: ptrue pn8.b
1148 ; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
1149 ; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
1150 ; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
1151 ; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
1152 ; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
1153 ; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
1154 ; STRIDED-NEXT: mov z2.d, z8.d
1155 ; STRIDED-NEXT: mov z3.d, z12.d
1156 ; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
1157 ; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload
1158 ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
1159 ; STRIDED-NEXT: mov z1.d, z4.d
1160 ; STRIDED-NEXT: addvl sp, sp, #17
1161 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1164 ; CONTIGUOUS-LABEL: ldnt1_x4_i32_z0_z4_z8_z12_scalar:
1165 ; CONTIGUOUS: // %bb.0:
1166 ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1167 ; CONTIGUOUS-NEXT: addvl sp, sp, #-15
1168 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
1169 ; CONTIGUOUS-NEXT: ptrue pn8.b
1170 ; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
1171 ; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
1172 ; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
1173 ; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
1174 ; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
1175 ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill
1176 ; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill
1177 ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
1178 ; CONTIGUOUS-NEXT: addvl sp, sp, #-4
1179 ; CONTIGUOUS-NEXT: mov p8.b, p0.b
1180 ; CONTIGUOUS-NEXT: ldnt1w { z0.s - z3.s }, pn8/z, [x0, x1, lsl #2]
1181 ; CONTIGUOUS-NEXT: str z0, [sp]
1182 ; CONTIGUOUS-NEXT: str z1, [sp, #1, mul vl]
1183 ; CONTIGUOUS-NEXT: str z2, [sp, #2, mul vl]
1184 ; CONTIGUOUS-NEXT: str z3, [sp, #3, mul vl]
1185 ; CONTIGUOUS-NEXT: //APP
1186 ; CONTIGUOUS-NEXT: nop
1187 ; CONTIGUOUS-NEXT: //NO_APP
1188 ; CONTIGUOUS-NEXT: ldr z0, [sp]
1189 ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
1190 ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
1191 ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
1192 ; CONTIGUOUS-NEXT: addvl sp, sp, #4
1193 ; CONTIGUOUS-NEXT: ptrue pn8.b
1194 ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
1195 ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
1196 ; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
1197 ; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
1198 ; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
1199 ; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
1200 ; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
1201 ; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload
1202 ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
1203 ; CONTIGUOUS-NEXT: addvl sp, sp, #15
1204 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1205 ; CONTIGUOUS-NEXT: ret
1206 %base = getelementptr i32, ptr %ptr, i64 %index
1207 %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv4i32(target("aarch64.svcount") %pn, ptr %base)
1208 call void asm sideeffect "nop", "~{z1},~{z2},~{z3},~{z5},~{z6},~{z7},~{z9},~{z10},~{z11},~{z13},~{z14},~{z15},~{z16},~{z17},~{z18},~{z19},~{z20},~{z21},~{z22},~{z23},~{z24},~{z25},~{z26},~{z27},~{z28},~{z29},~{z30},~{z31}"() nounwind
1209 %res.v0 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res, 0
1210 %v0 = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> poison, <vscale x 4 x i32> %res.v0, i64 0)
1211 %res.v1 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res, 1
1212 %v1 = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> %v0, <vscale x 4 x i32> %res.v1, i64 4)
1213 %res.v2 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res, 2
1214 %v2 = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> %v1, <vscale x 4 x i32> %res.v2, i64 8)
1215 %res.v3 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res, 3
1216 %v3 = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> %v2, <vscale x 4 x i32> %res.v3, i64 12)
1217 ret <vscale x 16 x i32> %v3
1220 define <vscale x 8 x i64> @ldnt1_x4_i64_z0_z4_z8_z12(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %z1, target("aarch64.svcount") %pn, ptr %ptr) nounwind {
1221 ; STRIDED-LABEL: ldnt1_x4_i64_z0_z4_z8_z12:
1222 ; STRIDED: // %bb.0:
1223 ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1224 ; STRIDED-NEXT: addvl sp, sp, #-17
1225 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
1226 ; STRIDED-NEXT: ptrue pn8.b
1227 ; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
1228 ; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
1229 ; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
1230 ; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
1231 ; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
1232 ; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
1233 ; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
1234 ; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill
1235 ; STRIDED-NEXT: mov p8.b, p0.b
1236 ; STRIDED-NEXT: ldnt1d { z0.d, z4.d, z8.d, z12.d }, pn8/z, [x0]
1237 ; STRIDED-NEXT: //APP
1239 ; STRIDED-NEXT: //NO_APP
1240 ; STRIDED-NEXT: ptrue pn8.b
1241 ; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
1242 ; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
1243 ; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
1244 ; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
1245 ; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
1246 ; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
1247 ; STRIDED-NEXT: mov z2.d, z8.d
1248 ; STRIDED-NEXT: mov z3.d, z12.d
1249 ; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
1250 ; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload
1251 ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
1252 ; STRIDED-NEXT: mov z1.d, z4.d
1253 ; STRIDED-NEXT: addvl sp, sp, #17
1254 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1257 ; CONTIGUOUS-LABEL: ldnt1_x4_i64_z0_z4_z8_z12:
1258 ; CONTIGUOUS: // %bb.0:
1259 ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1260 ; CONTIGUOUS-NEXT: addvl sp, sp, #-15
1261 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
1262 ; CONTIGUOUS-NEXT: ptrue pn8.b
1263 ; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
1264 ; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
1265 ; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
1266 ; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
1267 ; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
1268 ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill
1269 ; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill
1270 ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
1271 ; CONTIGUOUS-NEXT: addvl sp, sp, #-4
1272 ; CONTIGUOUS-NEXT: mov p8.b, p0.b
1273 ; CONTIGUOUS-NEXT: ldnt1d { z0.d - z3.d }, pn8/z, [x0]
1274 ; CONTIGUOUS-NEXT: str z0, [sp]
1275 ; CONTIGUOUS-NEXT: str z1, [sp, #1, mul vl]
1276 ; CONTIGUOUS-NEXT: str z2, [sp, #2, mul vl]
1277 ; CONTIGUOUS-NEXT: str z3, [sp, #3, mul vl]
1278 ; CONTIGUOUS-NEXT: //APP
1279 ; CONTIGUOUS-NEXT: nop
1280 ; CONTIGUOUS-NEXT: //NO_APP
1281 ; CONTIGUOUS-NEXT: ldr z0, [sp]
1282 ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
1283 ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
1284 ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
1285 ; CONTIGUOUS-NEXT: addvl sp, sp, #4
1286 ; CONTIGUOUS-NEXT: ptrue pn8.b
1287 ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
1288 ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
1289 ; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
1290 ; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
1291 ; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
1292 ; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
1293 ; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
1294 ; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload
1295 ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
1296 ; CONTIGUOUS-NEXT: addvl sp, sp, #15
1297 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1298 ; CONTIGUOUS-NEXT: ret
1299 %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv2i64(target("aarch64.svcount") %pn, ptr %ptr)
1300 call void asm sideeffect "nop", "~{z1},~{z2},~{z3},~{z5},~{z6},~{z7},~{z9},~{z10},~{z11},~{z13},~{z14},~{z15},~{z16},~{z17},~{z18},~{z19},~{z20},~{z21},~{z22},~{z23},~{z24},~{z25},~{z26},~{z27},~{z28},~{z29},~{z30},~{z31}"() nounwind
1301 %res.v0 = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res, 0
1302 %v0 = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> poison, <vscale x 2 x i64> %res.v0, i64 0)
1303 %res.v1 = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res, 1
1304 %v1 = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> %v0, <vscale x 2 x i64> %res.v1, i64 2)
1305 %res.v2 = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res, 2
1306 %v2 = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> %v1, <vscale x 2 x i64> %res.v2, i64 4)
1307 %res.v3 = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res, 3
1308 %v3 = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> %v2, <vscale x 2 x i64> %res.v3, i64 6)
1309 ret <vscale x 8 x i64> %v3
1312 define <vscale x 8 x i64> @ldnt1_x4_i64_z0_z4_z8_z12_scalar(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %z1, target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
1313 ; STRIDED-LABEL: ldnt1_x4_i64_z0_z4_z8_z12_scalar:
1314 ; STRIDED: // %bb.0:
1315 ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1316 ; STRIDED-NEXT: addvl sp, sp, #-17
1317 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
1318 ; STRIDED-NEXT: ptrue pn8.b
1319 ; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
1320 ; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
1321 ; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
1322 ; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
1323 ; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
1324 ; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill
1325 ; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill
1326 ; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill
1327 ; STRIDED-NEXT: mov p8.b, p0.b
1328 ; STRIDED-NEXT: ldnt1d { z0.d, z4.d, z8.d, z12.d }, pn8/z, [x0, x1, lsl #3]
1329 ; STRIDED-NEXT: //APP
1331 ; STRIDED-NEXT: //NO_APP
1332 ; STRIDED-NEXT: ptrue pn8.b
1333 ; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
1334 ; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
1335 ; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
1336 ; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
1337 ; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
1338 ; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload
1339 ; STRIDED-NEXT: mov z2.d, z8.d
1340 ; STRIDED-NEXT: mov z3.d, z12.d
1341 ; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload
1342 ; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload
1343 ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
1344 ; STRIDED-NEXT: mov z1.d, z4.d
1345 ; STRIDED-NEXT: addvl sp, sp, #17
1346 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1349 ; CONTIGUOUS-LABEL: ldnt1_x4_i64_z0_z4_z8_z12_scalar:
1350 ; CONTIGUOUS: // %bb.0:
1351 ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1352 ; CONTIGUOUS-NEXT: addvl sp, sp, #-15
1353 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
1354 ; CONTIGUOUS-NEXT: ptrue pn8.b
1355 ; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
1356 ; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
1357 ; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill
1358 ; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill
1359 ; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill
1360 ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill
1361 ; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill
1362 ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
1363 ; CONTIGUOUS-NEXT: addvl sp, sp, #-4
1364 ; CONTIGUOUS-NEXT: mov p8.b, p0.b
1365 ; CONTIGUOUS-NEXT: ldnt1d { z0.d - z3.d }, pn8/z, [x0, x1, lsl #3]
1366 ; CONTIGUOUS-NEXT: str z0, [sp]
1367 ; CONTIGUOUS-NEXT: str z1, [sp, #1, mul vl]
1368 ; CONTIGUOUS-NEXT: str z2, [sp, #2, mul vl]
1369 ; CONTIGUOUS-NEXT: str z3, [sp, #3, mul vl]
1370 ; CONTIGUOUS-NEXT: //APP
1371 ; CONTIGUOUS-NEXT: nop
1372 ; CONTIGUOUS-NEXT: //NO_APP
1373 ; CONTIGUOUS-NEXT: ldr z0, [sp]
1374 ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
1375 ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
1376 ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
1377 ; CONTIGUOUS-NEXT: addvl sp, sp, #4
1378 ; CONTIGUOUS-NEXT: ptrue pn8.b
1379 ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
1380 ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
1381 ; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
1382 ; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
1383 ; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload
1384 ; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload
1385 ; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload
1386 ; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload
1387 ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
1388 ; CONTIGUOUS-NEXT: addvl sp, sp, #15
1389 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1390 ; CONTIGUOUS-NEXT: ret
1391 %base = getelementptr i64, ptr %ptr, i64 %index
1392 %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv2i64(target("aarch64.svcount") %pn, ptr %base)
1393 call void asm sideeffect "nop", "~{z1},~{z2},~{z3},~{z5},~{z6},~{z7},~{z9},~{z10},~{z11},~{z13},~{z14},~{z15},~{z16},~{z17},~{z18},~{z19},~{z20},~{z21},~{z22},~{z23},~{z24},~{z25},~{z26},~{z27},~{z28},~{z29},~{z30},~{z31}"() nounwind
1394 %res.v0 = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res, 0
1395 %v0 = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> poison, <vscale x 2 x i64> %res.v0, i64 0)
1396 %res.v1 = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res, 1
1397 %v1 = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> %v0, <vscale x 2 x i64> %res.v1, i64 2)
1398 %res.v2 = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res, 2
1399 %v2 = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> %v1, <vscale x 2 x i64> %res.v2, i64 4)
1400 %res.v3 = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res, 3
1401 %v3 = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> %v2, <vscale x 2 x i64> %res.v3, i64 6)
1402 ret <vscale x 8 x i64> %v3
1405 declare <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8>, <vscale x 16 x i8>, i64)
1406 declare <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16>, <vscale x 8 x i16>, i64)
1407 declare <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32>, <vscale x 4 x i32>, i64)
1408 declare <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64>, <vscale x 2 x i64>, i64)
1409 declare <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8>, <vscale x 16 x i8>, i64)
1410 declare <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16>, <vscale x 8 x i16>, i64)
1411 declare <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32>, <vscale x 4 x i32>, i64)
1412 declare <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64>, <vscale x 2 x i64>, i64)
1413 declare { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv16i8(target("aarch64.svcount"), ptr)
1414 declare { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv8i16(target("aarch64.svcount"), ptr)
1415 declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv4i32(target("aarch64.svcount"), ptr)
1416 declare { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv2i64(target("aarch64.svcount"), ptr)
1417 declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv16i8(target("aarch64.svcount"), ptr)
1418 declare { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv8i16(target("aarch64.svcount"), ptr)
1419 declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv4i32(target("aarch64.svcount"), ptr)
1420 declare { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv2i64(target("aarch64.svcount"), ptr)