1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp,+fp64 -mve-max-interleave-factor=4 -verify-machineinstrs %s -o - | FileCheck %s
6 define void @vld4_v2i32(<8 x i32> *%src, <2 x i32> *%dst) {
7 ; CHECK-LABEL: vld4_v2i32:
8 ; CHECK: @ %bb.0: @ %entry
9 ; CHECK-NEXT: vldrw.u32 q1, [r0, #16]
10 ; CHECK-NEXT: vldrw.u32 q0, [r0]
11 ; CHECK-NEXT: vmov.f32 s10, s7
12 ; CHECK-NEXT: vmov r2, s6
13 ; CHECK-NEXT: vmov.f32 s6, s5
14 ; CHECK-NEXT: vmov r3, s4
15 ; CHECK-NEXT: vmov.f32 s8, s3
16 ; CHECK-NEXT: vmov.f32 s12, s1
17 ; CHECK-NEXT: vmov r0, s10
18 ; CHECK-NEXT: add r0, r2
19 ; CHECK-NEXT: vmov r2, s6
20 ; CHECK-NEXT: add r2, r3
21 ; CHECK-NEXT: vmov r3, s2
22 ; CHECK-NEXT: add.w r12, r2, r0
23 ; CHECK-NEXT: vmov r2, s8
24 ; CHECK-NEXT: vmov r0, s0
25 ; CHECK-NEXT: add r2, r3
26 ; CHECK-NEXT: vmov r3, s12
27 ; CHECK-NEXT: add r0, r3
28 ; CHECK-NEXT: add r0, r2
29 ; CHECK-NEXT: strd r0, r12, [r1]
32 %l1 = load <8 x i32>, <8 x i32>* %src, align 4
33 %s1 = shufflevector <8 x i32> %l1, <8 x i32> undef, <2 x i32> <i32 0, i32 4>
34 %s2 = shufflevector <8 x i32> %l1, <8 x i32> undef, <2 x i32> <i32 1, i32 5>
35 %s3 = shufflevector <8 x i32> %l1, <8 x i32> undef, <2 x i32> <i32 2, i32 6>
36 %s4 = shufflevector <8 x i32> %l1, <8 x i32> undef, <2 x i32> <i32 3, i32 7>
37 %a1 = add <2 x i32> %s1, %s2
38 %a2 = add <2 x i32> %s3, %s4
39 %a3 = add <2 x i32> %a1, %a2
40 store <2 x i32> %a3, <2 x i32> *%dst
44 define void @vld4_v4i32(<16 x i32> *%src, <4 x i32> *%dst) {
45 ; CHECK-LABEL: vld4_v4i32:
46 ; CHECK: @ %bb.0: @ %entry
47 ; CHECK-NEXT: vld40.32 {q0, q1, q2, q3}, [r0]
48 ; CHECK-NEXT: vld41.32 {q0, q1, q2, q3}, [r0]
49 ; CHECK-NEXT: vld42.32 {q0, q1, q2, q3}, [r0]
50 ; CHECK-NEXT: vld43.32 {q0, q1, q2, q3}, [r0]
51 ; CHECK-NEXT: vadd.i32 q2, q2, q3
52 ; CHECK-NEXT: vadd.i32 q0, q0, q1
53 ; CHECK-NEXT: vadd.i32 q0, q0, q2
54 ; CHECK-NEXT: vstrw.32 q0, [r1]
57 %l1 = load <16 x i32>, <16 x i32>* %src, align 4
58 %s1 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
59 %s2 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
60 %s3 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
61 %s4 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
62 %a1 = add <4 x i32> %s1, %s2
63 %a2 = add <4 x i32> %s3, %s4
64 %a3 = add <4 x i32> %a1, %a2
65 store <4 x i32> %a3, <4 x i32> *%dst
69 define void @vld4_v8i32(<32 x i32> *%src, <8 x i32> *%dst) {
70 ; CHECK-LABEL: vld4_v8i32:
71 ; CHECK: @ %bb.0: @ %entry
72 ; CHECK-NEXT: .vsave {d8, d9, d10, d11}
73 ; CHECK-NEXT: vpush {d8, d9, d10, d11}
74 ; CHECK-NEXT: vld40.32 {q0, q1, q2, q3}, [r0]
75 ; CHECK-NEXT: vld41.32 {q0, q1, q2, q3}, [r0]
76 ; CHECK-NEXT: vld42.32 {q0, q1, q2, q3}, [r0]
77 ; CHECK-NEXT: vld43.32 {q0, q1, q2, q3}, [r0]!
78 ; CHECK-NEXT: vadd.i32 q4, q2, q3
79 ; CHECK-NEXT: vadd.i32 q5, q0, q1
80 ; CHECK-NEXT: vld40.32 {q0, q1, q2, q3}, [r0]
81 ; CHECK-NEXT: vadd.i32 q4, q5, q4
82 ; CHECK-NEXT: vld41.32 {q0, q1, q2, q3}, [r0]
83 ; CHECK-NEXT: vld42.32 {q0, q1, q2, q3}, [r0]
84 ; CHECK-NEXT: vld43.32 {q0, q1, q2, q3}, [r0]
85 ; CHECK-NEXT: vstrw.32 q4, [r1]
86 ; CHECK-NEXT: vadd.i32 q2, q2, q3
87 ; CHECK-NEXT: vadd.i32 q0, q0, q1
88 ; CHECK-NEXT: vadd.i32 q0, q0, q2
89 ; CHECK-NEXT: vstrw.32 q0, [r1, #16]
90 ; CHECK-NEXT: vpop {d8, d9, d10, d11}
93 %l1 = load <32 x i32>, <32 x i32>* %src, align 4
94 %s1 = shufflevector <32 x i32> %l1, <32 x i32> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
95 %s2 = shufflevector <32 x i32> %l1, <32 x i32> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
96 %s3 = shufflevector <32 x i32> %l1, <32 x i32> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
97 %s4 = shufflevector <32 x i32> %l1, <32 x i32> undef, <8 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31>
98 %a1 = add <8 x i32> %s1, %s2
99 %a2 = add <8 x i32> %s3, %s4
100 %a3 = add <8 x i32> %a1, %a2
101 store <8 x i32> %a3, <8 x i32> *%dst
105 define void @vld4_v16i32(<64 x i32> *%src, <16 x i32> *%dst) {
106 ; CHECK-LABEL: vld4_v16i32:
107 ; CHECK: @ %bb.0: @ %entry
108 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
109 ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
110 ; CHECK-NEXT: .pad #32
111 ; CHECK-NEXT: sub sp, #32
112 ; CHECK-NEXT: vld40.32 {q0, q1, q2, q3}, [r0]
113 ; CHECK-NEXT: mov r2, r0
114 ; CHECK-NEXT: add.w r3, r0, #192
115 ; CHECK-NEXT: vld41.32 {q0, q1, q2, q3}, [r0]
116 ; CHECK-NEXT: vld42.32 {q0, q1, q2, q3}, [r0]
117 ; CHECK-NEXT: adds r0, #128
118 ; CHECK-NEXT: vld43.32 {q0, q1, q2, q3}, [r2]!
119 ; CHECK-NEXT: vadd.i32 q2, q2, q3
120 ; CHECK-NEXT: vld40.32 {q3, q4, q5, q6}, [r3]
121 ; CHECK-NEXT: vadd.i32 q0, q0, q1
122 ; CHECK-NEXT: vld41.32 {q3, q4, q5, q6}, [r3]
123 ; CHECK-NEXT: vadd.i32 q0, q0, q2
124 ; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill
125 ; CHECK-NEXT: vld42.32 {q3, q4, q5, q6}, [r3]
126 ; CHECK-NEXT: vld43.32 {q3, q4, q5, q6}, [r3]
127 ; CHECK-NEXT: vadd.i32 q1, q5, q6
128 ; CHECK-NEXT: vadd.i32 q2, q3, q4
129 ; CHECK-NEXT: vadd.i32 q0, q2, q1
130 ; CHECK-NEXT: vld40.32 {q4, q5, q6, q7}, [r2]
131 ; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill
132 ; CHECK-NEXT: vld40.32 {q0, q1, q2, q3}, [r0]
133 ; CHECK-NEXT: vld41.32 {q4, q5, q6, q7}, [r2]
134 ; CHECK-NEXT: vld41.32 {q0, q1, q2, q3}, [r0]
135 ; CHECK-NEXT: vld42.32 {q4, q5, q6, q7}, [r2]
136 ; CHECK-NEXT: vld42.32 {q0, q1, q2, q3}, [r0]
137 ; CHECK-NEXT: vld43.32 {q4, q5, q6, q7}, [r2]
138 ; CHECK-NEXT: vld43.32 {q0, q1, q2, q3}, [r0]
139 ; CHECK-NEXT: vadd.i32 q0, q0, q1
140 ; CHECK-NEXT: vadd.i32 q2, q2, q3
141 ; CHECK-NEXT: vadd.i32 q0, q0, q2
142 ; CHECK-NEXT: vadd.i32 q1, q6, q7
143 ; CHECK-NEXT: vadd.i32 q2, q4, q5
144 ; CHECK-NEXT: vstrw.32 q0, [r1, #32]
145 ; CHECK-NEXT: vadd.i32 q1, q2, q1
146 ; CHECK-NEXT: vldrw.u32 q2, [sp] @ 16-byte Reload
147 ; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload
148 ; CHECK-NEXT: vstrw.32 q1, [r1, #16]
149 ; CHECK-NEXT: vstrw.32 q2, [r1, #48]
150 ; CHECK-NEXT: vstrw.32 q0, [r1]
151 ; CHECK-NEXT: add sp, #32
152 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
155 %l1 = load <64 x i32>, <64 x i32>* %src, align 4
156 %s1 = shufflevector <64 x i32> %l1, <64 x i32> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60>
157 %s2 = shufflevector <64 x i32> %l1, <64 x i32> undef, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61>
158 %s3 = shufflevector <64 x i32> %l1, <64 x i32> undef, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62>
159 %s4 = shufflevector <64 x i32> %l1, <64 x i32> undef, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63>
160 %a1 = add <16 x i32> %s1, %s2
161 %a2 = add <16 x i32> %s3, %s4
162 %a3 = add <16 x i32> %a1, %a2
163 store <16 x i32> %a3, <16 x i32> *%dst
167 define void @vld4_v4i32_align1(<16 x i32> *%src, <4 x i32> *%dst) {
168 ; CHECK-LABEL: vld4_v4i32_align1:
169 ; CHECK: @ %bb.0: @ %entry
170 ; CHECK-NEXT: .vsave {d8, d9, d10, d11}
171 ; CHECK-NEXT: vpush {d8, d9, d10, d11}
172 ; CHECK-NEXT: vldrb.u8 q0, [r0, #48]
173 ; CHECK-NEXT: vldrb.u8 q1, [r0, #32]
174 ; CHECK-NEXT: vldrb.u8 q3, [r0, #16]
175 ; CHECK-NEXT: vldrb.u8 q2, [r0]
176 ; CHECK-NEXT: vmov.f32 s18, s7
177 ; CHECK-NEXT: vmov.f32 s16, s11
178 ; CHECK-NEXT: vmov.f32 s20, s10
179 ; CHECK-NEXT: vmov.f32 s17, s15
180 ; CHECK-NEXT: vmov.f32 s19, s3
181 ; CHECK-NEXT: vmov.f32 s21, s14
182 ; CHECK-NEXT: vmov.f32 s22, s6
183 ; CHECK-NEXT: vmov.f32 s23, s2
184 ; CHECK-NEXT: vadd.i32 q4, q5, q4
185 ; CHECK-NEXT: vmov.f32 s20, s9
186 ; CHECK-NEXT: vmov.f32 s21, s13
187 ; CHECK-NEXT: vmov.f32 s22, s5
188 ; CHECK-NEXT: vmov.f32 s23, s1
189 ; CHECK-NEXT: vmov.f32 s9, s12
190 ; CHECK-NEXT: vmov.f32 s10, s4
191 ; CHECK-NEXT: vmov.f32 s11, s0
192 ; CHECK-NEXT: vadd.i32 q0, q2, q5
193 ; CHECK-NEXT: vadd.i32 q0, q0, q4
194 ; CHECK-NEXT: vstrw.32 q0, [r1]
195 ; CHECK-NEXT: vpop {d8, d9, d10, d11}
198 %l1 = load <16 x i32>, <16 x i32>* %src, align 1
199 %s1 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
200 %s2 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
201 %s3 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
202 %s4 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
203 %a1 = add <4 x i32> %s1, %s2
204 %a2 = add <4 x i32> %s3, %s4
205 %a3 = add <4 x i32> %a1, %a2
206 store <4 x i32> %a3, <4 x i32> *%dst
212 define void @vld4_v2i16(<8 x i16> *%src, <2 x i16> *%dst) {
213 ; CHECK-LABEL: vld4_v2i16:
214 ; CHECK: @ %bb.0: @ %entry
215 ; CHECK-NEXT: vldrh.u16 q0, [r0]
216 ; CHECK-NEXT: vmov.u16 r0, q0[7]
217 ; CHECK-NEXT: vmov.u16 r2, q0[6]
218 ; CHECK-NEXT: add r0, r2
219 ; CHECK-NEXT: vmov.u16 r2, q0[5]
220 ; CHECK-NEXT: vmov.u16 r3, q0[4]
221 ; CHECK-NEXT: add r2, r3
222 ; CHECK-NEXT: vmov.u16 r3, q0[0]
223 ; CHECK-NEXT: add r0, r2
224 ; CHECK-NEXT: strh r0, [r1, #2]
225 ; CHECK-NEXT: vmov.u16 r0, q0[3]
226 ; CHECK-NEXT: vmov.u16 r2, q0[2]
227 ; CHECK-NEXT: add r0, r2
228 ; CHECK-NEXT: vmov.u16 r2, q0[1]
229 ; CHECK-NEXT: add r2, r3
230 ; CHECK-NEXT: add r0, r2
231 ; CHECK-NEXT: strh r0, [r1]
234 %l1 = load <8 x i16>, <8 x i16>* %src, align 2
235 %s1 = shufflevector <8 x i16> %l1, <8 x i16> undef, <2 x i32> <i32 0, i32 4>
236 %s2 = shufflevector <8 x i16> %l1, <8 x i16> undef, <2 x i32> <i32 1, i32 5>
237 %s3 = shufflevector <8 x i16> %l1, <8 x i16> undef, <2 x i32> <i32 2, i32 6>
238 %s4 = shufflevector <8 x i16> %l1, <8 x i16> undef, <2 x i32> <i32 3, i32 7>
239 %a1 = add <2 x i16> %s1, %s2
240 %a2 = add <2 x i16> %s3, %s4
241 %a3 = add <2 x i16> %a1, %a2
242 store <2 x i16> %a3, <2 x i16> *%dst
246 define void @vld4_v4i16(<16 x i16> *%src, <4 x i16> *%dst) {
247 ; CHECK-LABEL: vld4_v4i16:
248 ; CHECK: @ %bb.0: @ %entry
249 ; CHECK-NEXT: .vsave {d8, d9}
250 ; CHECK-NEXT: vpush {d8, d9}
251 ; CHECK-NEXT: vldrh.u16 q0, [r0, #16]
252 ; CHECK-NEXT: vldrh.u16 q1, [r0]
253 ; CHECK-NEXT: vmov.u16 r2, q0[3]
254 ; CHECK-NEXT: vmov.u16 r0, q1[3]
255 ; CHECK-NEXT: vmov q2[2], q2[0], r0, r2
256 ; CHECK-NEXT: vmov.u16 r0, q0[7]
257 ; CHECK-NEXT: vmov.u16 r2, q1[7]
258 ; CHECK-NEXT: vmov q2[3], q2[1], r2, r0
259 ; CHECK-NEXT: vmov.u16 r0, q0[2]
260 ; CHECK-NEXT: vmov.u16 r2, q1[2]
261 ; CHECK-NEXT: vmov q3[2], q3[0], r2, r0
262 ; CHECK-NEXT: vmov.u16 r0, q0[6]
263 ; CHECK-NEXT: vmov.u16 r2, q1[6]
264 ; CHECK-NEXT: vmov q3[3], q3[1], r2, r0
265 ; CHECK-NEXT: vmov.u16 r0, q0[0]
266 ; CHECK-NEXT: vmov.u16 r2, q1[0]
267 ; CHECK-NEXT: vadd.i32 q2, q3, q2
268 ; CHECK-NEXT: vmov q3[2], q3[0], r2, r0
269 ; CHECK-NEXT: vmov.u16 r0, q0[1]
270 ; CHECK-NEXT: vmov.u16 r2, q1[1]
271 ; CHECK-NEXT: vmov q4[2], q4[0], r2, r0
272 ; CHECK-NEXT: vmov.u16 r0, q0[5]
273 ; CHECK-NEXT: vmov.u16 r2, q1[5]
274 ; CHECK-NEXT: vmov q4[3], q4[1], r2, r0
275 ; CHECK-NEXT: vmov.u16 r0, q0[4]
276 ; CHECK-NEXT: vmov.u16 r2, q1[4]
277 ; CHECK-NEXT: vmov q3[3], q3[1], r2, r0
278 ; CHECK-NEXT: vadd.i32 q0, q3, q4
279 ; CHECK-NEXT: vadd.i32 q0, q0, q2
280 ; CHECK-NEXT: vstrh.32 q0, [r1]
281 ; CHECK-NEXT: vpop {d8, d9}
284 %l1 = load <16 x i16>, <16 x i16>* %src, align 2
285 %s1 = shufflevector <16 x i16> %l1, <16 x i16> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
286 %s2 = shufflevector <16 x i16> %l1, <16 x i16> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
287 %s3 = shufflevector <16 x i16> %l1, <16 x i16> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
288 %s4 = shufflevector <16 x i16> %l1, <16 x i16> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
289 %a1 = add <4 x i16> %s1, %s2
290 %a2 = add <4 x i16> %s3, %s4
291 %a3 = add <4 x i16> %a1, %a2
292 store <4 x i16> %a3, <4 x i16> *%dst
296 define void @vld4_v8i16(<32 x i16> *%src, <8 x i16> *%dst) {
297 ; CHECK-LABEL: vld4_v8i16:
298 ; CHECK: @ %bb.0: @ %entry
299 ; CHECK-NEXT: vld40.16 {q0, q1, q2, q3}, [r0]
300 ; CHECK-NEXT: vld41.16 {q0, q1, q2, q3}, [r0]
301 ; CHECK-NEXT: vld42.16 {q0, q1, q2, q3}, [r0]
302 ; CHECK-NEXT: vld43.16 {q0, q1, q2, q3}, [r0]
303 ; CHECK-NEXT: vadd.i16 q2, q2, q3
304 ; CHECK-NEXT: vadd.i16 q0, q0, q1
305 ; CHECK-NEXT: vadd.i16 q0, q0, q2
306 ; CHECK-NEXT: vstrw.32 q0, [r1]
309 %l1 = load <32 x i16>, <32 x i16>* %src, align 2
310 %s1 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
311 %s2 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
312 %s3 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
313 %s4 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31>
314 %a1 = add <8 x i16> %s1, %s2
315 %a2 = add <8 x i16> %s3, %s4
316 %a3 = add <8 x i16> %a1, %a2
317 store <8 x i16> %a3, <8 x i16> *%dst
321 define void @vld4_v16i16(<64 x i16> *%src, <16 x i16> *%dst) {
322 ; CHECK-LABEL: vld4_v16i16:
323 ; CHECK: @ %bb.0: @ %entry
324 ; CHECK-NEXT: .vsave {d8, d9, d10, d11}
325 ; CHECK-NEXT: vpush {d8, d9, d10, d11}
326 ; CHECK-NEXT: vld40.16 {q0, q1, q2, q3}, [r0]
327 ; CHECK-NEXT: vld41.16 {q0, q1, q2, q3}, [r0]
328 ; CHECK-NEXT: vld42.16 {q0, q1, q2, q3}, [r0]
329 ; CHECK-NEXT: vld43.16 {q0, q1, q2, q3}, [r0]!
330 ; CHECK-NEXT: vadd.i16 q4, q2, q3
331 ; CHECK-NEXT: vadd.i16 q5, q0, q1
332 ; CHECK-NEXT: vld40.16 {q0, q1, q2, q3}, [r0]
333 ; CHECK-NEXT: vadd.i16 q4, q5, q4
334 ; CHECK-NEXT: vld41.16 {q0, q1, q2, q3}, [r0]
335 ; CHECK-NEXT: vld42.16 {q0, q1, q2, q3}, [r0]
336 ; CHECK-NEXT: vld43.16 {q0, q1, q2, q3}, [r0]
337 ; CHECK-NEXT: vstrw.32 q4, [r1]
338 ; CHECK-NEXT: vadd.i16 q2, q2, q3
339 ; CHECK-NEXT: vadd.i16 q0, q0, q1
340 ; CHECK-NEXT: vadd.i16 q0, q0, q2
341 ; CHECK-NEXT: vstrw.32 q0, [r1, #16]
342 ; CHECK-NEXT: vpop {d8, d9, d10, d11}
345 %l1 = load <64 x i16>, <64 x i16>* %src, align 2
346 %s1 = shufflevector <64 x i16> %l1, <64 x i16> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60>
347 %s2 = shufflevector <64 x i16> %l1, <64 x i16> undef, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61>
348 %s3 = shufflevector <64 x i16> %l1, <64 x i16> undef, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62>
349 %s4 = shufflevector <64 x i16> %l1, <64 x i16> undef, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63>
350 %a1 = add <16 x i16> %s1, %s2
351 %a2 = add <16 x i16> %s3, %s4
352 %a3 = add <16 x i16> %a1, %a2
353 store <16 x i16> %a3, <16 x i16> *%dst
357 define void @vld4_v8i16_align1(<32 x i16> *%src, <8 x i16> *%dst) {
358 ; CHECK-LABEL: vld4_v8i16_align1:
359 ; CHECK: @ %bb.0: @ %entry
360 ; CHECK-NEXT: .vsave {d8, d9, d10, d11}
361 ; CHECK-NEXT: vpush {d8, d9, d10, d11}
362 ; CHECK-NEXT: vldrb.u8 q1, [r0, #32]
363 ; CHECK-NEXT: vldrb.u8 q2, [r0, #48]
364 ; CHECK-NEXT: vmovx.f16 s18, s5
365 ; CHECK-NEXT: vmovx.f16 s0, s7
366 ; CHECK-NEXT: vins.f16 s18, s0
367 ; CHECK-NEXT: vmovx.f16 s19, s9
368 ; CHECK-NEXT: vmovx.f16 s0, s11
369 ; CHECK-NEXT: vins.f16 s5, s7
370 ; CHECK-NEXT: vins.f16 s19, s0
371 ; CHECK-NEXT: vldrb.u8 q0, [r0]
372 ; CHECK-NEXT: vins.f16 s9, s11
373 ; CHECK-NEXT: vmov.f32 s22, s5
374 ; CHECK-NEXT: vmovx.f16 s16, s1
375 ; CHECK-NEXT: vmovx.f16 s12, s3
376 ; CHECK-NEXT: vins.f16 s16, s12
377 ; CHECK-NEXT: vldrb.u8 q3, [r0, #16]
378 ; CHECK-NEXT: vins.f16 s1, s3
379 ; CHECK-NEXT: vmov.f32 s23, s9
380 ; CHECK-NEXT: vmovx.f16 s17, s13
381 ; CHECK-NEXT: vmovx.f16 s20, s15
382 ; CHECK-NEXT: vins.f16 s13, s15
383 ; CHECK-NEXT: vins.f16 s17, s20
384 ; CHECK-NEXT: vmov.f32 s20, s1
385 ; CHECK-NEXT: vmovx.f16 s1, s6
386 ; CHECK-NEXT: vmov.f32 s21, s13
387 ; CHECK-NEXT: vadd.i16 q4, q5, q4
388 ; CHECK-NEXT: vmovx.f16 s22, s4
389 ; CHECK-NEXT: vins.f16 s22, s1
390 ; CHECK-NEXT: vmovx.f16 s23, s8
391 ; CHECK-NEXT: vmovx.f16 s1, s10
392 ; CHECK-NEXT: vmovx.f16 s20, s0
393 ; CHECK-NEXT: vins.f16 s23, s1
394 ; CHECK-NEXT: vmovx.f16 s1, s2
395 ; CHECK-NEXT: vins.f16 s20, s1
396 ; CHECK-NEXT: vmovx.f16 s21, s12
397 ; CHECK-NEXT: vmovx.f16 s1, s14
398 ; CHECK-NEXT: vins.f16 s8, s10
399 ; CHECK-NEXT: vins.f16 s4, s6
400 ; CHECK-NEXT: vins.f16 s12, s14
401 ; CHECK-NEXT: vins.f16 s21, s1
402 ; CHECK-NEXT: vins.f16 s0, s2
403 ; CHECK-NEXT: vmov.f32 s3, s8
404 ; CHECK-NEXT: vmov.f32 s1, s12
405 ; CHECK-NEXT: vmov.f32 s2, s4
406 ; CHECK-NEXT: vadd.i16 q0, q0, q5
407 ; CHECK-NEXT: vadd.i16 q0, q0, q4
408 ; CHECK-NEXT: vstrw.32 q0, [r1]
409 ; CHECK-NEXT: vpop {d8, d9, d10, d11}
412 %l1 = load <32 x i16>, <32 x i16>* %src, align 1
413 %s1 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
414 %s2 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
415 %s3 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
416 %s4 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31>
417 %a1 = add <8 x i16> %s1, %s2
418 %a2 = add <8 x i16> %s3, %s4
419 %a3 = add <8 x i16> %a1, %a2
420 store <8 x i16> %a3, <8 x i16> *%dst
426 define void @vld4_v2i8(<8 x i8> *%src, <2 x i8> *%dst) {
427 ; CHECK-LABEL: vld4_v2i8:
428 ; CHECK: @ %bb.0: @ %entry
429 ; CHECK-NEXT: vldrb.u16 q0, [r0]
430 ; CHECK-NEXT: vmov.u16 r0, q0[7]
431 ; CHECK-NEXT: vmov.u16 r2, q0[6]
432 ; CHECK-NEXT: add r0, r2
433 ; CHECK-NEXT: vmov.u16 r2, q0[5]
434 ; CHECK-NEXT: vmov.u16 r3, q0[4]
435 ; CHECK-NEXT: add r2, r3
436 ; CHECK-NEXT: vmov.u16 r3, q0[0]
437 ; CHECK-NEXT: add r0, r2
438 ; CHECK-NEXT: strb r0, [r1, #1]
439 ; CHECK-NEXT: vmov.u16 r0, q0[3]
440 ; CHECK-NEXT: vmov.u16 r2, q0[2]
441 ; CHECK-NEXT: add r0, r2
442 ; CHECK-NEXT: vmov.u16 r2, q0[1]
443 ; CHECK-NEXT: add r2, r3
444 ; CHECK-NEXT: add r0, r2
445 ; CHECK-NEXT: strb r0, [r1]
448 %l1 = load <8 x i8>, <8 x i8>* %src, align 1
449 %s1 = shufflevector <8 x i8> %l1, <8 x i8> undef, <2 x i32> <i32 0, i32 4>
450 %s2 = shufflevector <8 x i8> %l1, <8 x i8> undef, <2 x i32> <i32 1, i32 5>
451 %s3 = shufflevector <8 x i8> %l1, <8 x i8> undef, <2 x i32> <i32 2, i32 6>
452 %s4 = shufflevector <8 x i8> %l1, <8 x i8> undef, <2 x i32> <i32 3, i32 7>
453 %a1 = add <2 x i8> %s1, %s2
454 %a2 = add <2 x i8> %s3, %s4
455 %a3 = add <2 x i8> %a1, %a2
456 store <2 x i8> %a3, <2 x i8> *%dst
460 define void @vld4_v4i8(<16 x i8> *%src, <4 x i8> *%dst) {
461 ; CHECK-LABEL: vld4_v4i8:
462 ; CHECK: @ %bb.0: @ %entry
463 ; CHECK-NEXT: vldrb.u8 q0, [r0]
464 ; CHECK-NEXT: vmov.u8 r0, q0[10]
465 ; CHECK-NEXT: vmov.u8 r2, q0[2]
466 ; CHECK-NEXT: vmov q1[2], q1[0], r2, r0
467 ; CHECK-NEXT: vmov.u8 r0, q0[14]
468 ; CHECK-NEXT: vmov.u8 r2, q0[6]
469 ; CHECK-NEXT: vrev32.8 q2, q0
470 ; CHECK-NEXT: vmov q1[3], q1[1], r2, r0
471 ; CHECK-NEXT: vadd.i32 q1, q1, q2
472 ; CHECK-NEXT: vrev16.8 q2, q0
473 ; CHECK-NEXT: vadd.i32 q0, q0, q2
474 ; CHECK-NEXT: vadd.i32 q0, q0, q1
475 ; CHECK-NEXT: vstrb.32 q0, [r1]
478 %l1 = load <16 x i8>, <16 x i8>* %src, align 1
479 %s1 = shufflevector <16 x i8> %l1, <16 x i8> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
480 %s2 = shufflevector <16 x i8> %l1, <16 x i8> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
481 %s3 = shufflevector <16 x i8> %l1, <16 x i8> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
482 %s4 = shufflevector <16 x i8> %l1, <16 x i8> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
483 %a1 = add <4 x i8> %s1, %s2
484 %a2 = add <4 x i8> %s3, %s4
485 %a3 = add <4 x i8> %a1, %a2
486 store <4 x i8> %a3, <4 x i8> *%dst
490 define void @vld4_v8i8(<32 x i8> *%src, <8 x i8> *%dst) {
491 ; CHECK-LABEL: vld4_v8i8:
492 ; CHECK: @ %bb.0: @ %entry
493 ; CHECK-NEXT: .vsave {d8, d9}
494 ; CHECK-NEXT: vpush {d8, d9}
495 ; CHECK-NEXT: vldrb.u8 q1, [r0]
496 ; CHECK-NEXT: vldrb.u8 q0, [r0, #16]
497 ; CHECK-NEXT: vmov.u8 r2, q1[3]
498 ; CHECK-NEXT: vmov.u8 r0, q0[3]
499 ; CHECK-NEXT: vmov.16 q2[0], r2
500 ; CHECK-NEXT: vmov.u8 r2, q1[7]
501 ; CHECK-NEXT: vmov.16 q2[1], r2
502 ; CHECK-NEXT: vmov.u8 r2, q1[11]
503 ; CHECK-NEXT: vmov.16 q2[2], r2
504 ; CHECK-NEXT: vmov.u8 r2, q1[15]
505 ; CHECK-NEXT: vmov.16 q2[3], r2
506 ; CHECK-NEXT: vmov.16 q2[4], r0
507 ; CHECK-NEXT: vmov.u8 r0, q0[7]
508 ; CHECK-NEXT: vmov.16 q2[5], r0
509 ; CHECK-NEXT: vmov.u8 r0, q0[11]
510 ; CHECK-NEXT: vmov.16 q2[6], r0
511 ; CHECK-NEXT: vmov.u8 r0, q0[15]
512 ; CHECK-NEXT: vmov.16 q2[7], r0
513 ; CHECK-NEXT: vmov.u8 r0, q1[2]
514 ; CHECK-NEXT: vmov.16 q3[0], r0
515 ; CHECK-NEXT: vmov.u8 r0, q1[6]
516 ; CHECK-NEXT: vmov.16 q3[1], r0
517 ; CHECK-NEXT: vmov.u8 r0, q1[10]
518 ; CHECK-NEXT: vmov.16 q3[2], r0
519 ; CHECK-NEXT: vmov.u8 r0, q1[14]
520 ; CHECK-NEXT: vmov.16 q3[3], r0
521 ; CHECK-NEXT: vmov.u8 r0, q0[2]
522 ; CHECK-NEXT: vmov.16 q3[4], r0
523 ; CHECK-NEXT: vmov.u8 r0, q0[6]
524 ; CHECK-NEXT: vmov.16 q3[5], r0
525 ; CHECK-NEXT: vmov.u8 r0, q0[10]
526 ; CHECK-NEXT: vmov.16 q3[6], r0
527 ; CHECK-NEXT: vmov.u8 r0, q0[14]
528 ; CHECK-NEXT: vmov.16 q3[7], r0
529 ; CHECK-NEXT: vmov.u8 r0, q1[0]
530 ; CHECK-NEXT: vadd.i16 q2, q3, q2
531 ; CHECK-NEXT: vmov.16 q3[0], r0
532 ; CHECK-NEXT: vmov.u8 r0, q1[4]
533 ; CHECK-NEXT: vmov.16 q3[1], r0
534 ; CHECK-NEXT: vmov.u8 r0, q1[8]
535 ; CHECK-NEXT: vmov.16 q3[2], r0
536 ; CHECK-NEXT: vmov.u8 r0, q1[12]
537 ; CHECK-NEXT: vmov.16 q3[3], r0
538 ; CHECK-NEXT: vmov.u8 r0, q0[0]
539 ; CHECK-NEXT: vmov.16 q3[4], r0
540 ; CHECK-NEXT: vmov.u8 r0, q0[4]
541 ; CHECK-NEXT: vmov.16 q3[5], r0
542 ; CHECK-NEXT: vmov.u8 r0, q0[8]
543 ; CHECK-NEXT: vmov.16 q3[6], r0
544 ; CHECK-NEXT: vmov.u8 r0, q1[1]
545 ; CHECK-NEXT: vmov.16 q4[0], r0
546 ; CHECK-NEXT: vmov.u8 r0, q1[5]
547 ; CHECK-NEXT: vmov.16 q4[1], r0
548 ; CHECK-NEXT: vmov.u8 r0, q1[9]
549 ; CHECK-NEXT: vmov.16 q4[2], r0
550 ; CHECK-NEXT: vmov.u8 r0, q1[13]
551 ; CHECK-NEXT: vmov.16 q4[3], r0
552 ; CHECK-NEXT: vmov.u8 r0, q0[1]
553 ; CHECK-NEXT: vmov.16 q4[4], r0
554 ; CHECK-NEXT: vmov.u8 r0, q0[5]
555 ; CHECK-NEXT: vmov.16 q4[5], r0
556 ; CHECK-NEXT: vmov.u8 r0, q0[9]
557 ; CHECK-NEXT: vmov.16 q4[6], r0
558 ; CHECK-NEXT: vmov.u8 r0, q0[13]
559 ; CHECK-NEXT: vmov.16 q4[7], r0
560 ; CHECK-NEXT: vmov.u8 r0, q0[12]
561 ; CHECK-NEXT: vmov.16 q3[7], r0
562 ; CHECK-NEXT: vadd.i16 q0, q3, q4
563 ; CHECK-NEXT: vadd.i16 q0, q0, q2
564 ; CHECK-NEXT: vstrb.16 q0, [r1]
565 ; CHECK-NEXT: vpop {d8, d9}
568 %l1 = load <32 x i8>, <32 x i8>* %src, align 1
569 %s1 = shufflevector <32 x i8> %l1, <32 x i8> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
570 %s2 = shufflevector <32 x i8> %l1, <32 x i8> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
571 %s3 = shufflevector <32 x i8> %l1, <32 x i8> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
572 %s4 = shufflevector <32 x i8> %l1, <32 x i8> undef, <8 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31>
573 %a1 = add <8 x i8> %s1, %s2
574 %a2 = add <8 x i8> %s3, %s4
575 %a3 = add <8 x i8> %a1, %a2
576 store <8 x i8> %a3, <8 x i8> *%dst
580 define void @vld4_v16i8(<64 x i8> *%src, <16 x i8> *%dst) {
581 ; CHECK-LABEL: vld4_v16i8:
582 ; CHECK: @ %bb.0: @ %entry
583 ; CHECK-NEXT: vld40.8 {q0, q1, q2, q3}, [r0]
584 ; CHECK-NEXT: vld41.8 {q0, q1, q2, q3}, [r0]
585 ; CHECK-NEXT: vld42.8 {q0, q1, q2, q3}, [r0]
586 ; CHECK-NEXT: vld43.8 {q0, q1, q2, q3}, [r0]
587 ; CHECK-NEXT: vadd.i8 q2, q2, q3
588 ; CHECK-NEXT: vadd.i8 q0, q0, q1
589 ; CHECK-NEXT: vadd.i8 q0, q0, q2
590 ; CHECK-NEXT: vstrw.32 q0, [r1]
593 %l1 = load <64 x i8>, <64 x i8>* %src, align 1
594 %s1 = shufflevector <64 x i8> %l1, <64 x i8> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60>
595 %s2 = shufflevector <64 x i8> %l1, <64 x i8> undef, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61>
596 %s3 = shufflevector <64 x i8> %l1, <64 x i8> undef, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62>
597 %s4 = shufflevector <64 x i8> %l1, <64 x i8> undef, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63>
598 %a1 = add <16 x i8> %s1, %s2
599 %a2 = add <16 x i8> %s3, %s4
600 %a3 = add <16 x i8> %a1, %a2
601 store <16 x i8> %a3, <16 x i8> *%dst
607 define void @vld4_v2i64(<8 x i64> *%src, <2 x i64> *%dst) {
608 ; CHECK-LABEL: vld4_v2i64:
609 ; CHECK: @ %bb.0: @ %entry
610 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr}
611 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr}
612 ; CHECK-NEXT: .vsave {d8, d9}
613 ; CHECK-NEXT: vpush {d8, d9}
614 ; CHECK-NEXT: vldrw.u32 q0, [r0, #16]
615 ; CHECK-NEXT: vldrw.u32 q2, [r0, #48]
616 ; CHECK-NEXT: vldrw.u32 q4, [r0, #32]
617 ; CHECK-NEXT: vmov.f32 s4, s2
618 ; CHECK-NEXT: vmov.f32 s5, s3
619 ; CHECK-NEXT: vmov.f32 s2, s8
620 ; CHECK-NEXT: vmov.f32 s3, s9
621 ; CHECK-NEXT: vmov lr, r12, d5
622 ; CHECK-NEXT: vldrw.u32 q2, [r0]
623 ; CHECK-NEXT: vmov r0, r8, d9
624 ; CHECK-NEXT: vmov.f32 s12, s10
625 ; CHECK-NEXT: vmov.f32 s13, s11
626 ; CHECK-NEXT: vmov r2, r3, d1
627 ; CHECK-NEXT: vmov.f32 s2, s16
628 ; CHECK-NEXT: vmov.f32 s3, s17
629 ; CHECK-NEXT: vmov r5, r6, d1
630 ; CHECK-NEXT: adds.w r2, r2, lr
631 ; CHECK-NEXT: adc.w r3, r3, r12
632 ; CHECK-NEXT: vmov r4, r12, d2
633 ; CHECK-NEXT: adds r0, r0, r5
634 ; CHECK-NEXT: vmov r5, r7, d0
635 ; CHECK-NEXT: adc.w r6, r6, r8
636 ; CHECK-NEXT: adds r0, r0, r2
637 ; CHECK-NEXT: adc.w lr, r6, r3
638 ; CHECK-NEXT: vmov r3, r6, d6
639 ; CHECK-NEXT: adds r5, r5, r4
640 ; CHECK-NEXT: vmov r4, r2, d4
641 ; CHECK-NEXT: adc.w r7, r7, r12
642 ; CHECK-NEXT: adds r3, r3, r4
643 ; CHECK-NEXT: adcs r2, r6
644 ; CHECK-NEXT: adds r3, r3, r5
645 ; CHECK-NEXT: adcs r2, r7
646 ; CHECK-NEXT: vmov q0[2], q0[0], r3, r0
647 ; CHECK-NEXT: vmov q0[3], q0[1], r2, lr
648 ; CHECK-NEXT: vstrw.32 q0, [r1]
649 ; CHECK-NEXT: vpop {d8, d9}
650 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc}
652 %l1 = load <8 x i64>, <8 x i64>* %src, align 8
653 %s1 = shufflevector <8 x i64> %l1, <8 x i64> undef, <2 x i32> <i32 0, i32 4>
654 %s2 = shufflevector <8 x i64> %l1, <8 x i64> undef, <2 x i32> <i32 1, i32 5>
655 %s3 = shufflevector <8 x i64> %l1, <8 x i64> undef, <2 x i32> <i32 2, i32 6>
656 %s4 = shufflevector <8 x i64> %l1, <8 x i64> undef, <2 x i32> <i32 3, i32 7>
657 %a1 = add <2 x i64> %s1, %s2
658 %a2 = add <2 x i64> %s3, %s4
659 %a3 = add <2 x i64> %a1, %a2
660 store <2 x i64> %a3, <2 x i64> *%dst
664 define void @vld4_v4i64(<16 x i64> *%src, <4 x i64> *%dst) {
665 ; CHECK-LABEL: vld4_v4i64:
666 ; CHECK: @ %bb.0: @ %entry
667 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr}
668 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, lr}
669 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
670 ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
671 ; CHECK-NEXT: .pad #16
672 ; CHECK-NEXT: sub sp, #16
673 ; CHECK-NEXT: vldrw.u32 q0, [r0]
674 ; CHECK-NEXT: vldrw.u32 q5, [r0, #48]
675 ; CHECK-NEXT: vldrw.u32 q4, [r0, #32]
676 ; CHECK-NEXT: vldrw.u32 q6, [r0, #80]
677 ; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill
678 ; CHECK-NEXT: vmov.f32 s8, s2
679 ; CHECK-NEXT: vmov.f32 s9, s3
680 ; CHECK-NEXT: vldrw.u32 q1, [r0, #16]
681 ; CHECK-NEXT: vmov.f32 s2, s20
682 ; CHECK-NEXT: vldrw.u32 q7, [r0, #112]
683 ; CHECK-NEXT: vmov.f32 s3, s21
684 ; CHECK-NEXT: vmov r3, r2, d11
685 ; CHECK-NEXT: vldrw.u32 q5, [r0, #96]
686 ; CHECK-NEXT: vmov.f32 s0, s26
687 ; CHECK-NEXT: vmov.f32 s1, s27
688 ; CHECK-NEXT: vmov lr, r12, d9
689 ; CHECK-NEXT: vmov.f32 s12, s6
690 ; CHECK-NEXT: vmov.f32 s13, s7
691 ; CHECK-NEXT: vmov r4, r5, d1
692 ; CHECK-NEXT: vmov.f32 s2, s16
693 ; CHECK-NEXT: vmov.f32 s3, s17
694 ; CHECK-NEXT: vldrw.u32 q4, [r0, #64]
695 ; CHECK-NEXT: vmov.f32 s6, s28
696 ; CHECK-NEXT: vmov.f32 s7, s29
697 ; CHECK-NEXT: vmov.f32 s10, s20
698 ; CHECK-NEXT: vmov.f32 s11, s21
699 ; CHECK-NEXT: vmov r0, r6, d1
700 ; CHECK-NEXT: adds r7, r4, r3
701 ; CHECK-NEXT: vmov r4, r8, d0
702 ; CHECK-NEXT: adcs r5, r2
703 ; CHECK-NEXT: vmov r2, r3, d12
704 ; CHECK-NEXT: vmov.f32 s0, s18
705 ; CHECK-NEXT: vmov.f32 s1, s19
706 ; CHECK-NEXT: adds.w r0, r0, lr
707 ; CHECK-NEXT: adc.w r6, r6, r12
708 ; CHECK-NEXT: adds.w lr, r0, r7
709 ; CHECK-NEXT: adc.w r12, r6, r5
710 ; CHECK-NEXT: vmov r6, r5, d0
711 ; CHECK-NEXT: vldrw.u32 q0, [sp] @ 16-byte Reload
712 ; CHECK-NEXT: adds r2, r2, r4
713 ; CHECK-NEXT: vmov r4, r0, d8
714 ; CHECK-NEXT: adc.w r3, r3, r8
715 ; CHECK-NEXT: adds r6, r6, r4
716 ; CHECK-NEXT: adcs r0, r5
717 ; CHECK-NEXT: adds.w r9, r6, r2
718 ; CHECK-NEXT: adc.w r8, r0, r3
719 ; CHECK-NEXT: vmov r5, r4, d15
720 ; CHECK-NEXT: vmov r3, r6, d3
721 ; CHECK-NEXT: vmov r7, r0, d5
722 ; CHECK-NEXT: adds r3, r3, r5
723 ; CHECK-NEXT: adcs r6, r4
724 ; CHECK-NEXT: vmov r5, r4, d11
725 ; CHECK-NEXT: adds r5, r5, r7
726 ; CHECK-NEXT: adcs r0, r4
727 ; CHECK-NEXT: adds r3, r3, r5
728 ; CHECK-NEXT: adc.w r10, r0, r6
729 ; CHECK-NEXT: vmov r4, r5, d4
730 ; CHECK-NEXT: vmov r6, r7, d0
731 ; CHECK-NEXT: vmov r2, r0, d2
732 ; CHECK-NEXT: vmov q1[2], q1[0], r9, r3
733 ; CHECK-NEXT: vmov q1[3], q1[1], r8, r10
734 ; CHECK-NEXT: vstrw.32 q1, [r1, #16]
735 ; CHECK-NEXT: adds r4, r4, r6
736 ; CHECK-NEXT: adcs r5, r7
737 ; CHECK-NEXT: vmov r6, r7, d6
738 ; CHECK-NEXT: adds r2, r2, r6
739 ; CHECK-NEXT: adcs r0, r7
740 ; CHECK-NEXT: adds r2, r2, r4
741 ; CHECK-NEXT: vmov q0[2], q0[0], r2, lr
742 ; CHECK-NEXT: adcs r0, r5
743 ; CHECK-NEXT: vmov q0[3], q0[1], r0, r12
744 ; CHECK-NEXT: vstrw.32 q0, [r1]
745 ; CHECK-NEXT: add sp, #16
746 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
747 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, pc}
749 %l1 = load <16 x i64>, <16 x i64>* %src, align 8
750 %s1 = shufflevector <16 x i64> %l1, <16 x i64> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
751 %s2 = shufflevector <16 x i64> %l1, <16 x i64> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
752 %s3 = shufflevector <16 x i64> %l1, <16 x i64> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
753 %s4 = shufflevector <16 x i64> %l1, <16 x i64> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
754 %a1 = add <4 x i64> %s1, %s2
755 %a2 = add <4 x i64> %s3, %s4
756 %a3 = add <4 x i64> %a1, %a2
757 store <4 x i64> %a3, <4 x i64> *%dst
763 define void @vld4_v2f32(<8 x float> *%src, <2 x float> *%dst) {
764 ; CHECK-LABEL: vld4_v2f32:
765 ; CHECK: @ %bb.0: @ %entry
766 ; CHECK-NEXT: vldrw.u32 q0, [r0, #16]
767 ; CHECK-NEXT: vldrw.u32 q1, [r0]
768 ; CHECK-NEXT: vmov.f32 s8, s7
769 ; CHECK-NEXT: vmov.f32 s9, s3
770 ; CHECK-NEXT: vmov.f32 s12, s6
771 ; CHECK-NEXT: vmov.f32 s13, s2
772 ; CHECK-NEXT: vadd.f32 q2, q3, q2
773 ; CHECK-NEXT: vmov.f32 s12, s5
774 ; CHECK-NEXT: vmov.f32 s13, s1
775 ; CHECK-NEXT: vmov.f32 s5, s0
776 ; CHECK-NEXT: vadd.f32 q0, q1, q3
777 ; CHECK-NEXT: vadd.f32 q0, q0, q2
778 ; CHECK-NEXT: vstmia r1, {s0, s1}
781 %l1 = load <8 x float>, <8 x float>* %src, align 4
782 %s1 = shufflevector <8 x float> %l1, <8 x float> undef, <2 x i32> <i32 0, i32 4>
783 %s2 = shufflevector <8 x float> %l1, <8 x float> undef, <2 x i32> <i32 1, i32 5>
784 %s3 = shufflevector <8 x float> %l1, <8 x float> undef, <2 x i32> <i32 2, i32 6>
785 %s4 = shufflevector <8 x float> %l1, <8 x float> undef, <2 x i32> <i32 3, i32 7>
786 %a1 = fadd <2 x float> %s1, %s2
787 %a2 = fadd <2 x float> %s3, %s4
788 %a3 = fadd <2 x float> %a1, %a2
789 store <2 x float> %a3, <2 x float> *%dst
793 define void @vld4_v4f32(<16 x float> *%src, <4 x float> *%dst) {
794 ; CHECK-LABEL: vld4_v4f32:
795 ; CHECK: @ %bb.0: @ %entry
796 ; CHECK-NEXT: vld40.32 {q0, q1, q2, q3}, [r0]
797 ; CHECK-NEXT: vld41.32 {q0, q1, q2, q3}, [r0]
798 ; CHECK-NEXT: vld42.32 {q0, q1, q2, q3}, [r0]
799 ; CHECK-NEXT: vld43.32 {q0, q1, q2, q3}, [r0]
800 ; CHECK-NEXT: vadd.f32 q2, q2, q3
801 ; CHECK-NEXT: vadd.f32 q0, q0, q1
802 ; CHECK-NEXT: vadd.f32 q0, q0, q2
803 ; CHECK-NEXT: vstrw.32 q0, [r1]
806 %l1 = load <16 x float>, <16 x float>* %src, align 4
807 %s1 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
808 %s2 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
809 %s3 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
810 %s4 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
811 %a1 = fadd <4 x float> %s1, %s2
812 %a2 = fadd <4 x float> %s3, %s4
813 %a3 = fadd <4 x float> %a1, %a2
814 store <4 x float> %a3, <4 x float> *%dst
818 define void @vld4_v8f32(<32 x float> *%src, <8 x float> *%dst) {
819 ; CHECK-LABEL: vld4_v8f32:
820 ; CHECK: @ %bb.0: @ %entry
821 ; CHECK-NEXT: .vsave {d8, d9, d10, d11}
822 ; CHECK-NEXT: vpush {d8, d9, d10, d11}
823 ; CHECK-NEXT: vld40.32 {q0, q1, q2, q3}, [r0]
824 ; CHECK-NEXT: vld41.32 {q0, q1, q2, q3}, [r0]
825 ; CHECK-NEXT: vld42.32 {q0, q1, q2, q3}, [r0]
826 ; CHECK-NEXT: vld43.32 {q0, q1, q2, q3}, [r0]!
827 ; CHECK-NEXT: vadd.f32 q4, q2, q3
828 ; CHECK-NEXT: vadd.f32 q5, q0, q1
829 ; CHECK-NEXT: vld40.32 {q0, q1, q2, q3}, [r0]
830 ; CHECK-NEXT: vadd.f32 q4, q5, q4
831 ; CHECK-NEXT: vld41.32 {q0, q1, q2, q3}, [r0]
832 ; CHECK-NEXT: vld42.32 {q0, q1, q2, q3}, [r0]
833 ; CHECK-NEXT: vld43.32 {q0, q1, q2, q3}, [r0]
834 ; CHECK-NEXT: vstrw.32 q4, [r1]
835 ; CHECK-NEXT: vadd.f32 q2, q2, q3
836 ; CHECK-NEXT: vadd.f32 q0, q0, q1
837 ; CHECK-NEXT: vadd.f32 q0, q0, q2
838 ; CHECK-NEXT: vstrw.32 q0, [r1, #16]
839 ; CHECK-NEXT: vpop {d8, d9, d10, d11}
842 %l1 = load <32 x float>, <32 x float>* %src, align 4
843 %s1 = shufflevector <32 x float> %l1, <32 x float> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
844 %s2 = shufflevector <32 x float> %l1, <32 x float> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
845 %s3 = shufflevector <32 x float> %l1, <32 x float> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
846 %s4 = shufflevector <32 x float> %l1, <32 x float> undef, <8 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31>
847 %a1 = fadd <8 x float> %s1, %s2
848 %a2 = fadd <8 x float> %s3, %s4
849 %a3 = fadd <8 x float> %a1, %a2
850 store <8 x float> %a3, <8 x float> *%dst
854 define void @vld4_v16f32(<64 x float> *%src, <16 x float> *%dst) {
855 ; CHECK-LABEL: vld4_v16f32:
856 ; CHECK: @ %bb.0: @ %entry
857 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
858 ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
859 ; CHECK-NEXT: .pad #32
860 ; CHECK-NEXT: sub sp, #32
861 ; CHECK-NEXT: vld40.32 {q0, q1, q2, q3}, [r0]
862 ; CHECK-NEXT: mov r2, r0
863 ; CHECK-NEXT: add.w r3, r0, #192
864 ; CHECK-NEXT: vld41.32 {q0, q1, q2, q3}, [r0]
865 ; CHECK-NEXT: vld42.32 {q0, q1, q2, q3}, [r0]
866 ; CHECK-NEXT: adds r0, #128
867 ; CHECK-NEXT: vld43.32 {q0, q1, q2, q3}, [r2]!
868 ; CHECK-NEXT: vadd.f32 q2, q2, q3
869 ; CHECK-NEXT: vld40.32 {q3, q4, q5, q6}, [r3]
870 ; CHECK-NEXT: vadd.f32 q0, q0, q1
871 ; CHECK-NEXT: vld41.32 {q3, q4, q5, q6}, [r3]
872 ; CHECK-NEXT: vadd.f32 q0, q0, q2
873 ; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill
874 ; CHECK-NEXT: vld42.32 {q3, q4, q5, q6}, [r3]
875 ; CHECK-NEXT: vld43.32 {q3, q4, q5, q6}, [r3]
876 ; CHECK-NEXT: vadd.f32 q1, q5, q6
877 ; CHECK-NEXT: vadd.f32 q2, q3, q4
878 ; CHECK-NEXT: vadd.f32 q0, q2, q1
879 ; CHECK-NEXT: vld40.32 {q4, q5, q6, q7}, [r2]
880 ; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill
881 ; CHECK-NEXT: vld40.32 {q0, q1, q2, q3}, [r0]
882 ; CHECK-NEXT: vld41.32 {q4, q5, q6, q7}, [r2]
883 ; CHECK-NEXT: vld41.32 {q0, q1, q2, q3}, [r0]
884 ; CHECK-NEXT: vld42.32 {q4, q5, q6, q7}, [r2]
885 ; CHECK-NEXT: vld42.32 {q0, q1, q2, q3}, [r0]
886 ; CHECK-NEXT: vld43.32 {q4, q5, q6, q7}, [r2]
887 ; CHECK-NEXT: vld43.32 {q0, q1, q2, q3}, [r0]
888 ; CHECK-NEXT: vadd.f32 q0, q0, q1
889 ; CHECK-NEXT: vadd.f32 q2, q2, q3
890 ; CHECK-NEXT: vadd.f32 q0, q0, q2
891 ; CHECK-NEXT: vadd.f32 q1, q6, q7
892 ; CHECK-NEXT: vadd.f32 q2, q4, q5
893 ; CHECK-NEXT: vstrw.32 q0, [r1, #32]
894 ; CHECK-NEXT: vadd.f32 q1, q2, q1
895 ; CHECK-NEXT: vldrw.u32 q2, [sp] @ 16-byte Reload
896 ; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload
897 ; CHECK-NEXT: vstrw.32 q1, [r1, #16]
898 ; CHECK-NEXT: vstrw.32 q2, [r1, #48]
899 ; CHECK-NEXT: vstrw.32 q0, [r1]
900 ; CHECK-NEXT: add sp, #32
901 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
904 %l1 = load <64 x float>, <64 x float>* %src, align 4
905 %s1 = shufflevector <64 x float> %l1, <64 x float> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60>
906 %s2 = shufflevector <64 x float> %l1, <64 x float> undef, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61>
907 %s3 = shufflevector <64 x float> %l1, <64 x float> undef, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62>
908 %s4 = shufflevector <64 x float> %l1, <64 x float> undef, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63>
909 %a1 = fadd <16 x float> %s1, %s2
910 %a2 = fadd <16 x float> %s3, %s4
911 %a3 = fadd <16 x float> %a1, %a2
912 store <16 x float> %a3, <16 x float> *%dst
916 define void @vld4_v4f32_align1(<16 x float> *%src, <4 x float> *%dst) {
917 ; CHECK-LABEL: vld4_v4f32_align1:
918 ; CHECK: @ %bb.0: @ %entry
919 ; CHECK-NEXT: .vsave {d8, d9, d10, d11}
920 ; CHECK-NEXT: vpush {d8, d9, d10, d11}
921 ; CHECK-NEXT: vldrb.u8 q0, [r0, #48]
922 ; CHECK-NEXT: vldrb.u8 q1, [r0, #32]
923 ; CHECK-NEXT: vldrb.u8 q3, [r0, #16]
924 ; CHECK-NEXT: vldrb.u8 q2, [r0]
925 ; CHECK-NEXT: vmov.f32 s18, s7
926 ; CHECK-NEXT: vmov.f32 s16, s11
927 ; CHECK-NEXT: vmov.f32 s20, s10
928 ; CHECK-NEXT: vmov.f32 s17, s15
929 ; CHECK-NEXT: vmov.f32 s19, s3
930 ; CHECK-NEXT: vmov.f32 s21, s14
931 ; CHECK-NEXT: vmov.f32 s22, s6
932 ; CHECK-NEXT: vmov.f32 s23, s2
933 ; CHECK-NEXT: vadd.f32 q4, q5, q4
934 ; CHECK-NEXT: vmov.f32 s20, s9
935 ; CHECK-NEXT: vmov.f32 s21, s13
936 ; CHECK-NEXT: vmov.f32 s22, s5
937 ; CHECK-NEXT: vmov.f32 s23, s1
938 ; CHECK-NEXT: vmov.f32 s9, s12
939 ; CHECK-NEXT: vmov.f32 s10, s4
940 ; CHECK-NEXT: vmov.f32 s11, s0
941 ; CHECK-NEXT: vadd.f32 q0, q2, q5
942 ; CHECK-NEXT: vadd.f32 q0, q0, q4
943 ; CHECK-NEXT: vstrw.32 q0, [r1]
944 ; CHECK-NEXT: vpop {d8, d9, d10, d11}
947 %l1 = load <16 x float>, <16 x float>* %src, align 1
948 %s1 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
949 %s2 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
950 %s3 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
951 %s4 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
952 %a1 = fadd <4 x float> %s1, %s2
953 %a2 = fadd <4 x float> %s3, %s4
954 %a3 = fadd <4 x float> %a1, %a2
955 store <4 x float> %a3, <4 x float> *%dst
961 define void @vld4_v2f16(<8 x half> *%src, <2 x half> *%dst) {
962 ; CHECK-LABEL: vld4_v2f16:
963 ; CHECK: @ %bb.0: @ %entry
964 ; CHECK-NEXT: vldrh.u16 q0, [r0]
965 ; CHECK-NEXT: vmovx.f16 s8, s1
966 ; CHECK-NEXT: vmovx.f16 s4, s3
967 ; CHECK-NEXT: vins.f16 s8, s4
968 ; CHECK-NEXT: vmovx.f16 s12, s0
969 ; CHECK-NEXT: vmovx.f16 s4, s2
970 ; CHECK-NEXT: vins.f16 s1, s3
971 ; CHECK-NEXT: vins.f16 s12, s4
972 ; CHECK-NEXT: vmov.f32 s4, s1
973 ; CHECK-NEXT: vins.f16 s0, s2
974 ; CHECK-NEXT: vadd.f16 q1, q1, q2
975 ; CHECK-NEXT: vadd.f16 q0, q0, q3
976 ; CHECK-NEXT: vadd.f16 q0, q0, q1
977 ; CHECK-NEXT: vmov r0, s0
978 ; CHECK-NEXT: str r0, [r1]
981 %l1 = load <8 x half>, <8 x half>* %src, align 2
982 %s1 = shufflevector <8 x half> %l1, <8 x half> undef, <2 x i32> <i32 0, i32 4>
983 %s2 = shufflevector <8 x half> %l1, <8 x half> undef, <2 x i32> <i32 1, i32 5>
984 %s3 = shufflevector <8 x half> %l1, <8 x half> undef, <2 x i32> <i32 2, i32 6>
985 %s4 = shufflevector <8 x half> %l1, <8 x half> undef, <2 x i32> <i32 3, i32 7>
986 %a1 = fadd <2 x half> %s1, %s2
987 %a2 = fadd <2 x half> %s3, %s4
988 %a3 = fadd <2 x half> %a1, %a2
989 store <2 x half> %a3, <2 x half> *%dst
993 define void @vld4_v4f16(<16 x half> *%src, <4 x half> *%dst) {
994 ; CHECK-LABEL: vld4_v4f16:
995 ; CHECK: @ %bb.0: @ %entry
996 ; CHECK-NEXT: .vsave {d8}
997 ; CHECK-NEXT: vpush {d8}
998 ; CHECK-NEXT: vldrh.u16 q0, [r0]
999 ; CHECK-NEXT: vldrh.u16 q2, [r0, #16]
1000 ; CHECK-NEXT: vmovx.f16 s4, s0
1001 ; CHECK-NEXT: vmovx.f16 s6, s2
1002 ; CHECK-NEXT: vins.f16 s0, s2
1003 ; CHECK-NEXT: vmovx.f16 s12, s1
1004 ; CHECK-NEXT: vmovx.f16 s2, s3
1005 ; CHECK-NEXT: vins.f16 s4, s6
1006 ; CHECK-NEXT: vmovx.f16 s5, s8
1007 ; CHECK-NEXT: vmovx.f16 s6, s10
1008 ; CHECK-NEXT: vins.f16 s12, s2
1009 ; CHECK-NEXT: vmovx.f16 s13, s9
1010 ; CHECK-NEXT: vmovx.f16 s2, s11
1011 ; CHECK-NEXT: vins.f16 s1, s3
1012 ; CHECK-NEXT: vins.f16 s9, s11
1013 ; CHECK-NEXT: vins.f16 s8, s10
1014 ; CHECK-NEXT: vmov.f32 s16, s1
1015 ; CHECK-NEXT: vins.f16 s5, s6
1016 ; CHECK-NEXT: vins.f16 s13, s2
1017 ; CHECK-NEXT: vmov.f32 s1, s8
1018 ; CHECK-NEXT: vmov.f32 s17, s9
1019 ; CHECK-NEXT: vadd.f16 q0, q0, q1
1020 ; CHECK-NEXT: vadd.f16 q3, q4, q3
1021 ; CHECK-NEXT: vadd.f16 q0, q0, q3
1022 ; CHECK-NEXT: vmov r0, r2, d0
1023 ; CHECK-NEXT: strd r0, r2, [r1]
1024 ; CHECK-NEXT: vpop {d8}
1027 %l1 = load <16 x half>, <16 x half>* %src, align 2
1028 %s1 = shufflevector <16 x half> %l1, <16 x half> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
1029 %s2 = shufflevector <16 x half> %l1, <16 x half> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
1030 %s3 = shufflevector <16 x half> %l1, <16 x half> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
1031 %s4 = shufflevector <16 x half> %l1, <16 x half> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
1032 %a1 = fadd <4 x half> %s1, %s2
1033 %a2 = fadd <4 x half> %s3, %s4
1034 %a3 = fadd <4 x half> %a1, %a2
1035 store <4 x half> %a3, <4 x half> *%dst
1039 define void @vld4_v8f16(<32 x half> *%src, <8 x half> *%dst) {
1040 ; CHECK-LABEL: vld4_v8f16:
1041 ; CHECK: @ %bb.0: @ %entry
1042 ; CHECK-NEXT: vld40.16 {q0, q1, q2, q3}, [r0]
1043 ; CHECK-NEXT: vld41.16 {q0, q1, q2, q3}, [r0]
1044 ; CHECK-NEXT: vld42.16 {q0, q1, q2, q3}, [r0]
1045 ; CHECK-NEXT: vld43.16 {q0, q1, q2, q3}, [r0]
1046 ; CHECK-NEXT: vadd.f16 q2, q2, q3
1047 ; CHECK-NEXT: vadd.f16 q0, q0, q1
1048 ; CHECK-NEXT: vadd.f16 q0, q0, q2
1049 ; CHECK-NEXT: vstrw.32 q0, [r1]
1052 %l1 = load <32 x half>, <32 x half>* %src, align 2
1053 %s1 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
1054 %s2 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
1055 %s3 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
1056 %s4 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31>
1057 %a1 = fadd <8 x half> %s1, %s2
1058 %a2 = fadd <8 x half> %s3, %s4
1059 %a3 = fadd <8 x half> %a1, %a2
1060 store <8 x half> %a3, <8 x half> *%dst
1064 define void @vld4_v16f16(<64 x half> *%src, <16 x half> *%dst) {
1065 ; CHECK-LABEL: vld4_v16f16:
1066 ; CHECK: @ %bb.0: @ %entry
1067 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
1068 ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
1069 ; CHECK-NEXT: vld40.16 {q0, q1, q2, q3}, [r0]
1070 ; CHECK-NEXT: vld41.16 {q0, q1, q2, q3}, [r0]
1071 ; CHECK-NEXT: vld42.16 {q0, q1, q2, q3}, [r0]
1072 ; CHECK-NEXT: vld43.16 {q0, q1, q2, q3}, [r0]!
1073 ; CHECK-NEXT: vld40.16 {q4, q5, q6, q7}, [r0]
1074 ; CHECK-NEXT: vadd.f16 q2, q2, q3
1075 ; CHECK-NEXT: vadd.f16 q0, q0, q1
1076 ; CHECK-NEXT: vld41.16 {q4, q5, q6, q7}, [r0]
1077 ; CHECK-NEXT: vadd.f16 q0, q0, q2
1078 ; CHECK-NEXT: vld42.16 {q4, q5, q6, q7}, [r0]
1079 ; CHECK-NEXT: vld43.16 {q4, q5, q6, q7}, [r0]
1080 ; CHECK-NEXT: vstrw.32 q0, [r1]
1081 ; CHECK-NEXT: vadd.f16 q6, q6, q7
1082 ; CHECK-NEXT: vadd.f16 q4, q4, q5
1083 ; CHECK-NEXT: vadd.f16 q4, q4, q6
1084 ; CHECK-NEXT: vstrw.32 q4, [r1, #16]
1085 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
1088 %l1 = load <64 x half>, <64 x half>* %src, align 2
1089 %s1 = shufflevector <64 x half> %l1, <64 x half> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60>
1090 %s2 = shufflevector <64 x half> %l1, <64 x half> undef, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61>
1091 %s3 = shufflevector <64 x half> %l1, <64 x half> undef, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62>
1092 %s4 = shufflevector <64 x half> %l1, <64 x half> undef, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63>
1093 %a1 = fadd <16 x half> %s1, %s2
1094 %a2 = fadd <16 x half> %s3, %s4
1095 %a3 = fadd <16 x half> %a1, %a2
1096 store <16 x half> %a3, <16 x half> *%dst
1100 define void @vld4_v8f16_align1(<32 x half> *%src, <8 x half> *%dst) {
1101 ; CHECK-LABEL: vld4_v8f16_align1:
1102 ; CHECK: @ %bb.0: @ %entry
1103 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
1104 ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
1105 ; CHECK-NEXT: vldrb.u8 q0, [r0, #32]
1106 ; CHECK-NEXT: vldrb.u8 q2, [r0, #48]
1107 ; CHECK-NEXT: vmovx.f16 s18, s1
1108 ; CHECK-NEXT: vmovx.f16 s4, s3
1109 ; CHECK-NEXT: vins.f16 s18, s4
1110 ; CHECK-NEXT: vmovx.f16 s19, s9
1111 ; CHECK-NEXT: vmovx.f16 s4, s11
1112 ; CHECK-NEXT: vins.f16 s1, s3
1113 ; CHECK-NEXT: vins.f16 s19, s4
1114 ; CHECK-NEXT: vldrb.u8 q1, [r0]
1115 ; CHECK-NEXT: vmovx.f16 s22, s0
1116 ; CHECK-NEXT: vmovx.f16 s3, s2
1117 ; CHECK-NEXT: vmovx.f16 s16, s5
1118 ; CHECK-NEXT: vmovx.f16 s12, s7
1119 ; CHECK-NEXT: vins.f16 s16, s12
1120 ; CHECK-NEXT: vldrb.u8 q3, [r0, #16]
1121 ; CHECK-NEXT: vins.f16 s22, s3
1122 ; CHECK-NEXT: vmovx.f16 s23, s8
1123 ; CHECK-NEXT: vmovx.f16 s17, s13
1124 ; CHECK-NEXT: vmovx.f16 s20, s15
1125 ; CHECK-NEXT: vmovx.f16 s3, s10
1126 ; CHECK-NEXT: vins.f16 s17, s20
1127 ; CHECK-NEXT: vins.f16 s23, s3
1128 ; CHECK-NEXT: vmovx.f16 s20, s4
1129 ; CHECK-NEXT: vmovx.f16 s3, s6
1130 ; CHECK-NEXT: vins.f16 s9, s11
1131 ; CHECK-NEXT: vins.f16 s5, s7
1132 ; CHECK-NEXT: vins.f16 s13, s15
1133 ; CHECK-NEXT: vins.f16 s20, s3
1134 ; CHECK-NEXT: vmovx.f16 s21, s12
1135 ; CHECK-NEXT: vmovx.f16 s3, s14
1136 ; CHECK-NEXT: vins.f16 s8, s10
1137 ; CHECK-NEXT: vins.f16 s0, s2
1138 ; CHECK-NEXT: vins.f16 s12, s14
1139 ; CHECK-NEXT: vins.f16 s4, s6
1140 ; CHECK-NEXT: vmov.f32 s24, s5
1141 ; CHECK-NEXT: vins.f16 s21, s3
1142 ; CHECK-NEXT: vmov.f32 s26, s1
1143 ; CHECK-NEXT: vmov.f32 s27, s9
1144 ; CHECK-NEXT: vmov.f32 s25, s13
1145 ; CHECK-NEXT: vmov.f32 s6, s0
1146 ; CHECK-NEXT: vadd.f16 q4, q6, q4
1147 ; CHECK-NEXT: vmov.f32 s7, s8
1148 ; CHECK-NEXT: vmov.f32 s5, s12
1149 ; CHECK-NEXT: vadd.f16 q0, q1, q5
1150 ; CHECK-NEXT: vadd.f16 q0, q0, q4
1151 ; CHECK-NEXT: vstrw.32 q0, [r1]
1152 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
1155 %l1 = load <32 x half>, <32 x half>* %src, align 1
1156 %s1 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
1157 %s2 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
1158 %s3 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
1159 %s4 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31>
1160 %a1 = fadd <8 x half> %s1, %s2
1161 %a2 = fadd <8 x half> %s3, %s4
1162 %a3 = fadd <8 x half> %a1, %a2
1163 store <8 x half> %a3, <8 x half> *%dst
1169 define void @vld4_v2f64(<8 x double> *%src, <2 x double> *%dst) {
1170 ; CHECK-LABEL: vld4_v2f64:
1171 ; CHECK: @ %bb.0: @ %entry
1172 ; CHECK-NEXT: vldrw.u32 q0, [r0, #48]
1173 ; CHECK-NEXT: vldrw.u32 q1, [r0, #32]
1174 ; CHECK-NEXT: vldrw.u32 q2, [r0]
1175 ; CHECK-NEXT: vadd.f64 d0, d0, d1
1176 ; CHECK-NEXT: vadd.f64 d1, d2, d3
1177 ; CHECK-NEXT: vldrw.u32 q1, [r0, #16]
1178 ; CHECK-NEXT: vadd.f64 d2, d2, d3
1179 ; CHECK-NEXT: vadd.f64 d3, d4, d5
1180 ; CHECK-NEXT: vadd.f64 d1, d1, d0
1181 ; CHECK-NEXT: vadd.f64 d0, d3, d2
1182 ; CHECK-NEXT: vstrw.32 q0, [r1]
1185 %l1 = load <8 x double>, <8 x double>* %src, align 8
1186 %s1 = shufflevector <8 x double> %l1, <8 x double> undef, <2 x i32> <i32 0, i32 4>
1187 %s2 = shufflevector <8 x double> %l1, <8 x double> undef, <2 x i32> <i32 1, i32 5>
1188 %s3 = shufflevector <8 x double> %l1, <8 x double> undef, <2 x i32> <i32 2, i32 6>
1189 %s4 = shufflevector <8 x double> %l1, <8 x double> undef, <2 x i32> <i32 3, i32 7>
1190 %a1 = fadd <2 x double> %s1, %s2
1191 %a2 = fadd <2 x double> %s3, %s4
1192 %a3 = fadd <2 x double> %a1, %a2
1193 store <2 x double> %a3, <2 x double> *%dst
1197 define void @vld4_v4f64(<16 x double> *%src, <4 x double> *%dst) {
1198 ; CHECK-LABEL: vld4_v4f64:
1199 ; CHECK: @ %bb.0: @ %entry
1200 ; CHECK-NEXT: .vsave {d8, d9}
1201 ; CHECK-NEXT: vpush {d8, d9}
1202 ; CHECK-NEXT: vldrw.u32 q0, [r0, #112]
1203 ; CHECK-NEXT: vldrw.u32 q1, [r0, #96]
1204 ; CHECK-NEXT: vldrw.u32 q2, [r0, #64]
1205 ; CHECK-NEXT: vldrw.u32 q3, [r0, #32]
1206 ; CHECK-NEXT: vadd.f64 d0, d0, d1
1207 ; CHECK-NEXT: vldrw.u32 q4, [r0]
1208 ; CHECK-NEXT: vadd.f64 d1, d2, d3
1209 ; CHECK-NEXT: vldrw.u32 q1, [r0, #80]
1210 ; CHECK-NEXT: vadd.f64 d2, d2, d3
1211 ; CHECK-NEXT: vadd.f64 d3, d4, d5
1212 ; CHECK-NEXT: vldrw.u32 q2, [r0, #48]
1213 ; CHECK-NEXT: vadd.f64 d4, d4, d5
1214 ; CHECK-NEXT: vadd.f64 d5, d6, d7
1215 ; CHECK-NEXT: vldrw.u32 q3, [r0, #16]
1216 ; CHECK-NEXT: vadd.f64 d6, d6, d7
1217 ; CHECK-NEXT: vadd.f64 d7, d8, d9
1218 ; CHECK-NEXT: vadd.f64 d1, d1, d0
1219 ; CHECK-NEXT: vadd.f64 d0, d3, d2
1220 ; CHECK-NEXT: vadd.f64 d3, d5, d4
1221 ; CHECK-NEXT: vstrw.32 q0, [r1, #16]
1222 ; CHECK-NEXT: vadd.f64 d2, d7, d6
1223 ; CHECK-NEXT: vstrw.32 q1, [r1]
1224 ; CHECK-NEXT: vpop {d8, d9}
1227 %l1 = load <16 x double>, <16 x double>* %src, align 8
1228 %s1 = shufflevector <16 x double> %l1, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
1229 %s2 = shufflevector <16 x double> %l1, <16 x double> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
1230 %s3 = shufflevector <16 x double> %l1, <16 x double> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
1231 %s4 = shufflevector <16 x double> %l1, <16 x double> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
1232 %a1 = fadd <4 x double> %s1, %s2
1233 %a2 = fadd <4 x double> %s3, %s4
1234 %a3 = fadd <4 x double> %a1, %a2
1235 store <4 x double> %a3, <4 x double> *%dst