1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -aarch64-sve-vector-bits-min=256 -aarch64-sve-vector-bits-max=256 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256
3 ; RUN: llc -aarch64-sve-vector-bits-min=512 -aarch64-sve-vector-bits-max=512 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_512
5 target triple = "aarch64-unknown-linux-gnu"
7 define void @zip1_v32i8(ptr %a, ptr %b) #0 {
8 ; VBITS_EQ_256-LABEL: zip1_v32i8:
9 ; VBITS_EQ_256: // %bb.0:
10 ; VBITS_EQ_256-NEXT: ptrue p0.b
11 ; VBITS_EQ_256-NEXT: ld1b { z0.b }, p0/z, [x0]
12 ; VBITS_EQ_256-NEXT: ld1b { z1.b }, p0/z, [x1]
13 ; VBITS_EQ_256-NEXT: zip1 z0.b, z0.b, z1.b
14 ; VBITS_EQ_256-NEXT: st1b { z0.b }, p0, [x0]
15 ; VBITS_EQ_256-NEXT: ret
17 ; VBITS_EQ_512-LABEL: zip1_v32i8:
18 ; VBITS_EQ_512: // %bb.0:
19 ; VBITS_EQ_512-NEXT: ptrue p0.b, vl32
20 ; VBITS_EQ_512-NEXT: ld1b { z0.b }, p0/z, [x0]
21 ; VBITS_EQ_512-NEXT: ld1b { z1.b }, p0/z, [x1]
22 ; VBITS_EQ_512-NEXT: zip1 z0.b, z0.b, z1.b
23 ; VBITS_EQ_512-NEXT: st1b { z0.b }, p0, [x0]
24 ; VBITS_EQ_512-NEXT: ret
25 %tmp1 = load volatile <32 x i8>, ptr %a
26 %tmp2 = load volatile <32 x i8>, ptr %b
27 %tmp3 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47>
28 store volatile <32 x i8> %tmp3, ptr %a
32 define void @zip_v32i16(ptr %a, ptr %b) #0 {
33 ; VBITS_EQ_256-LABEL: zip_v32i16:
34 ; VBITS_EQ_256: // %bb.0:
35 ; VBITS_EQ_256-NEXT: ptrue p0.h
36 ; VBITS_EQ_256-NEXT: mov x8, #16 // =0x10
37 ; VBITS_EQ_256-NEXT: ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
38 ; VBITS_EQ_256-NEXT: ld1h { z1.h }, p0/z, [x0]
39 ; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
40 ; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1]
41 ; VBITS_EQ_256-NEXT: zip2 z5.h, z0.h, z2.h
42 ; VBITS_EQ_256-NEXT: zip1 z0.h, z0.h, z2.h
43 ; VBITS_EQ_256-NEXT: zip2 z4.h, z1.h, z3.h
44 ; VBITS_EQ_256-NEXT: zip1 z1.h, z1.h, z3.h
45 ; VBITS_EQ_256-NEXT: add z2.h, z4.h, z5.h
46 ; VBITS_EQ_256-NEXT: add z0.h, z1.h, z0.h
47 ; VBITS_EQ_256-NEXT: st1h { z2.h }, p0, [x0, x8, lsl #1]
48 ; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0]
49 ; VBITS_EQ_256-NEXT: ret
51 ; VBITS_EQ_512-LABEL: zip_v32i16:
52 ; VBITS_EQ_512: // %bb.0:
53 ; VBITS_EQ_512-NEXT: ptrue p0.h
54 ; VBITS_EQ_512-NEXT: ld1h { z0.h }, p0/z, [x0]
55 ; VBITS_EQ_512-NEXT: ld1h { z1.h }, p0/z, [x1]
56 ; VBITS_EQ_512-NEXT: zip1 z2.h, z0.h, z1.h
57 ; VBITS_EQ_512-NEXT: zip2 z0.h, z0.h, z1.h
58 ; VBITS_EQ_512-NEXT: add z0.h, z2.h, z0.h
59 ; VBITS_EQ_512-NEXT: st1h { z0.h }, p0, [x0]
60 ; VBITS_EQ_512-NEXT: ret
61 %tmp1 = load <32 x i16>, ptr %a
62 %tmp2 = load <32 x i16>, ptr %b
63 %tmp3 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47>
64 %tmp4 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> <i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
65 %tmp5 = add <32 x i16> %tmp3, %tmp4
66 store <32 x i16> %tmp5, ptr %a
70 define void @zip1_v16i16(ptr %a, ptr %b) #0 {
71 ; VBITS_EQ_256-LABEL: zip1_v16i16:
72 ; VBITS_EQ_256: // %bb.0:
73 ; VBITS_EQ_256-NEXT: ptrue p0.h
74 ; VBITS_EQ_256-NEXT: ld1h { z0.h }, p0/z, [x0]
75 ; VBITS_EQ_256-NEXT: ld1h { z1.h }, p0/z, [x1]
76 ; VBITS_EQ_256-NEXT: zip1 z0.h, z0.h, z1.h
77 ; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0]
78 ; VBITS_EQ_256-NEXT: ret
80 ; VBITS_EQ_512-LABEL: zip1_v16i16:
81 ; VBITS_EQ_512: // %bb.0:
82 ; VBITS_EQ_512-NEXT: ptrue p0.h, vl16
83 ; VBITS_EQ_512-NEXT: ld1h { z0.h }, p0/z, [x0]
84 ; VBITS_EQ_512-NEXT: ld1h { z1.h }, p0/z, [x1]
85 ; VBITS_EQ_512-NEXT: zip1 z0.h, z0.h, z1.h
86 ; VBITS_EQ_512-NEXT: st1h { z0.h }, p0, [x0]
87 ; VBITS_EQ_512-NEXT: ret
88 %tmp1 = load volatile <16 x i16>, ptr %a
89 %tmp2 = load volatile <16 x i16>, ptr %b
90 %tmp3 = shufflevector <16 x i16> %tmp1, <16 x i16> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
91 store volatile <16 x i16> %tmp3, ptr %a
95 define void @zip1_v8i32(ptr %a, ptr %b) #0 {
96 ; VBITS_EQ_256-LABEL: zip1_v8i32:
97 ; VBITS_EQ_256: // %bb.0:
98 ; VBITS_EQ_256-NEXT: ptrue p0.s
99 ; VBITS_EQ_256-NEXT: ld1w { z0.s }, p0/z, [x0]
100 ; VBITS_EQ_256-NEXT: ld1w { z1.s }, p0/z, [x1]
101 ; VBITS_EQ_256-NEXT: zip1 z0.s, z0.s, z1.s
102 ; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0]
103 ; VBITS_EQ_256-NEXT: ret
105 ; VBITS_EQ_512-LABEL: zip1_v8i32:
106 ; VBITS_EQ_512: // %bb.0:
107 ; VBITS_EQ_512-NEXT: ptrue p0.s, vl8
108 ; VBITS_EQ_512-NEXT: ld1w { z0.s }, p0/z, [x0]
109 ; VBITS_EQ_512-NEXT: ld1w { z1.s }, p0/z, [x1]
110 ; VBITS_EQ_512-NEXT: zip1 z0.s, z0.s, z1.s
111 ; VBITS_EQ_512-NEXT: st1w { z0.s }, p0, [x0]
112 ; VBITS_EQ_512-NEXT: ret
113 %tmp1 = load volatile <8 x i32>, ptr %a
114 %tmp2 = load volatile <8 x i32>, ptr %b
115 %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
116 store volatile <8 x i32> %tmp3, ptr %a
120 define void @zip_v4f64(ptr %a, ptr %b) #0 {
121 ; VBITS_EQ_256-LABEL: zip_v4f64:
122 ; VBITS_EQ_256: // %bb.0:
123 ; VBITS_EQ_256-NEXT: ptrue p0.d
124 ; VBITS_EQ_256-NEXT: ld1d { z0.d }, p0/z, [x0]
125 ; VBITS_EQ_256-NEXT: ld1d { z1.d }, p0/z, [x1]
126 ; VBITS_EQ_256-NEXT: zip1 z2.d, z0.d, z1.d
127 ; VBITS_EQ_256-NEXT: zip2 z0.d, z0.d, z1.d
128 ; VBITS_EQ_256-NEXT: fadd z0.d, z2.d, z0.d
129 ; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x0]
130 ; VBITS_EQ_256-NEXT: ret
132 ; VBITS_EQ_512-LABEL: zip_v4f64:
133 ; VBITS_EQ_512: // %bb.0:
134 ; VBITS_EQ_512-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
135 ; VBITS_EQ_512-NEXT: sub x9, sp, #48
136 ; VBITS_EQ_512-NEXT: mov x29, sp
137 ; VBITS_EQ_512-NEXT: and sp, x9, #0xffffffffffffffe0
138 ; VBITS_EQ_512-NEXT: .cfi_def_cfa w29, 16
139 ; VBITS_EQ_512-NEXT: .cfi_offset w30, -8
140 ; VBITS_EQ_512-NEXT: .cfi_offset w29, -16
141 ; VBITS_EQ_512-NEXT: ptrue p0.d, vl4
142 ; VBITS_EQ_512-NEXT: mov x8, sp
143 ; VBITS_EQ_512-NEXT: ld1d { z0.d }, p0/z, [x1]
144 ; VBITS_EQ_512-NEXT: ld1d { z1.d }, p0/z, [x0]
145 ; VBITS_EQ_512-NEXT: mov z2.d, z0.d[3]
146 ; VBITS_EQ_512-NEXT: mov z3.d, z1.d[3]
147 ; VBITS_EQ_512-NEXT: mov z4.d, z0.d[2]
148 ; VBITS_EQ_512-NEXT: mov z5.d, z1.d[2]
149 ; VBITS_EQ_512-NEXT: zip1 z0.d, z1.d, z0.d
150 ; VBITS_EQ_512-NEXT: stp d3, d2, [sp, #16]
151 ; VBITS_EQ_512-NEXT: stp d5, d4, [sp]
152 ; VBITS_EQ_512-NEXT: ld1d { z1.d }, p0/z, [x8]
153 ; VBITS_EQ_512-NEXT: fadd z0.d, p0/m, z0.d, z1.d
154 ; VBITS_EQ_512-NEXT: st1d { z0.d }, p0, [x0]
155 ; VBITS_EQ_512-NEXT: mov sp, x29
156 ; VBITS_EQ_512-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
157 ; VBITS_EQ_512-NEXT: ret
158 %tmp1 = load <4 x double>, ptr %a
159 %tmp2 = load <4 x double>, ptr %b
160 %tmp3 = shufflevector <4 x double> %tmp1, <4 x double> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
161 %tmp4 = shufflevector <4 x double> %tmp1, <4 x double> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
162 %tmp5 = fadd <4 x double> %tmp3, %tmp4
163 store <4 x double> %tmp5, ptr %a
167 ; Don't use SVE for 128-bit vectors
168 define void @zip_v4i32(ptr %a, ptr %b) #0 {
169 ; CHECK-LABEL: zip_v4i32:
171 ; CHECK-NEXT: ldr q0, [x0]
172 ; CHECK-NEXT: ldr q1, [x1]
173 ; CHECK-NEXT: zip1 v2.4s, v0.4s, v1.4s
174 ; CHECK-NEXT: zip2 v0.4s, v0.4s, v1.4s
175 ; CHECK-NEXT: add v0.4s, v2.4s, v0.4s
176 ; CHECK-NEXT: str q0, [x0]
178 %tmp1 = load <4 x i32>, ptr %a
179 %tmp2 = load <4 x i32>, ptr %b
180 %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
181 %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
182 %tmp5 = add <4 x i32> %tmp3, %tmp4
183 store <4 x i32> %tmp5, ptr %a
187 define void @zip1_v8i32_undef(ptr %a) #0 {
188 ; VBITS_EQ_256-LABEL: zip1_v8i32_undef:
189 ; VBITS_EQ_256: // %bb.0:
190 ; VBITS_EQ_256-NEXT: ptrue p0.s
191 ; VBITS_EQ_256-NEXT: ld1w { z0.s }, p0/z, [x0]
192 ; VBITS_EQ_256-NEXT: zip1 z0.s, z0.s, z0.s
193 ; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0]
194 ; VBITS_EQ_256-NEXT: ret
196 ; VBITS_EQ_512-LABEL: zip1_v8i32_undef:
197 ; VBITS_EQ_512: // %bb.0:
198 ; VBITS_EQ_512-NEXT: ptrue p0.s, vl8
199 ; VBITS_EQ_512-NEXT: ld1w { z0.s }, p0/z, [x0]
200 ; VBITS_EQ_512-NEXT: zip1 z0.s, z0.s, z0.s
201 ; VBITS_EQ_512-NEXT: st1w { z0.s }, p0, [x0]
202 ; VBITS_EQ_512-NEXT: ret
203 %tmp1 = load volatile <8 x i32>, ptr %a
204 %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
205 store volatile <8 x i32> %tmp2, ptr %a
209 define void @trn_v32i8(ptr %a, ptr %b) #0 {
210 ; VBITS_EQ_256-LABEL: trn_v32i8:
211 ; VBITS_EQ_256: // %bb.0:
212 ; VBITS_EQ_256-NEXT: ptrue p0.b
213 ; VBITS_EQ_256-NEXT: ld1b { z0.b }, p0/z, [x0]
214 ; VBITS_EQ_256-NEXT: ld1b { z1.b }, p0/z, [x1]
215 ; VBITS_EQ_256-NEXT: trn1 z2.b, z0.b, z1.b
216 ; VBITS_EQ_256-NEXT: trn2 z0.b, z0.b, z1.b
217 ; VBITS_EQ_256-NEXT: add z0.b, z2.b, z0.b
218 ; VBITS_EQ_256-NEXT: st1b { z0.b }, p0, [x0]
219 ; VBITS_EQ_256-NEXT: ret
221 ; VBITS_EQ_512-LABEL: trn_v32i8:
222 ; VBITS_EQ_512: // %bb.0:
223 ; VBITS_EQ_512-NEXT: ptrue p0.b, vl32
224 ; VBITS_EQ_512-NEXT: ld1b { z0.b }, p0/z, [x0]
225 ; VBITS_EQ_512-NEXT: ld1b { z1.b }, p0/z, [x1]
226 ; VBITS_EQ_512-NEXT: trn1 z2.b, z0.b, z1.b
227 ; VBITS_EQ_512-NEXT: trn2 z0.b, z0.b, z1.b
228 ; VBITS_EQ_512-NEXT: add z0.b, z2.b, z0.b
229 ; VBITS_EQ_512-NEXT: st1b { z0.b }, p0, [x0]
230 ; VBITS_EQ_512-NEXT: ret
231 %tmp1 = load <32 x i8>, ptr %a
232 %tmp2 = load <32 x i8>, ptr %b
233 %tmp3 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 0, i32 32, i32 2, i32 34, i32 4, i32 36, i32 6, i32 38, i32 8, i32 40, i32 10, i32 42, i32 12, i32 44, i32 14, i32 46, i32 16, i32 48, i32 18, i32 50, i32 20, i32 52, i32 22, i32 54, i32 24, i32 56, i32 26, i32 58, i32 28, i32 60, i32 30, i32 62>
234 %tmp4 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 1, i32 33, i32 3, i32 35, i32 undef, i32 37, i32 7, i32 undef, i32 undef, i32 41, i32 11, i32 43, i32 13, i32 45, i32 15, i32 47, i32 17, i32 49, i32 19, i32 51, i32 21, i32 53, i32 23, i32 55, i32 25, i32 57, i32 27, i32 59, i32 29, i32 61, i32 31, i32 63>
235 %tmp5 = add <32 x i8> %tmp3, %tmp4
236 store <32 x i8> %tmp5, ptr %a
240 define void @trn_v32i16(ptr %a, ptr %b) #0 {
241 ; VBITS_EQ_256-LABEL: trn_v32i16:
242 ; VBITS_EQ_256: // %bb.0:
243 ; VBITS_EQ_256-NEXT: ptrue p0.h
244 ; VBITS_EQ_256-NEXT: mov x8, #16 // =0x10
245 ; VBITS_EQ_256-NEXT: ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
246 ; VBITS_EQ_256-NEXT: ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
247 ; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x0]
248 ; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1]
249 ; VBITS_EQ_256-NEXT: trn1 z4.h, z0.h, z1.h
250 ; VBITS_EQ_256-NEXT: trn2 z0.h, z0.h, z1.h
251 ; VBITS_EQ_256-NEXT: trn1 z1.h, z2.h, z3.h
252 ; VBITS_EQ_256-NEXT: trn2 z2.h, z2.h, z3.h
253 ; VBITS_EQ_256-NEXT: add z0.h, z4.h, z0.h
254 ; VBITS_EQ_256-NEXT: add z1.h, z1.h, z2.h
255 ; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
256 ; VBITS_EQ_256-NEXT: st1h { z1.h }, p0, [x0]
257 ; VBITS_EQ_256-NEXT: ret
259 ; VBITS_EQ_512-LABEL: trn_v32i16:
260 ; VBITS_EQ_512: // %bb.0:
261 ; VBITS_EQ_512-NEXT: ptrue p0.h
262 ; VBITS_EQ_512-NEXT: ld1h { z0.h }, p0/z, [x0]
263 ; VBITS_EQ_512-NEXT: ld1h { z1.h }, p0/z, [x1]
264 ; VBITS_EQ_512-NEXT: trn1 z2.h, z0.h, z1.h
265 ; VBITS_EQ_512-NEXT: trn2 z0.h, z0.h, z1.h
266 ; VBITS_EQ_512-NEXT: add z0.h, z2.h, z0.h
267 ; VBITS_EQ_512-NEXT: st1h { z0.h }, p0, [x0]
268 ; VBITS_EQ_512-NEXT: ret
269 %tmp1 = load <32 x i16>, ptr %a
270 %tmp2 = load <32 x i16>, ptr %b
271 %tmp3 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> <i32 0, i32 32, i32 2, i32 34, i32 4, i32 36, i32 6, i32 38, i32 8, i32 40, i32 10, i32 42, i32 12, i32 44, i32 14, i32 46, i32 16, i32 48, i32 18, i32 50, i32 20, i32 52, i32 22, i32 54, i32 24, i32 56, i32 26, i32 58, i32 28, i32 60, i32 30, i32 62>
272 %tmp4 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> <i32 1, i32 33, i32 3, i32 35, i32 undef, i32 37, i32 7, i32 undef, i32 undef, i32 41, i32 11, i32 43, i32 13, i32 45, i32 15, i32 47, i32 17, i32 49, i32 19, i32 51, i32 21, i32 53, i32 23, i32 55, i32 25, i32 57, i32 27, i32 59, i32 29, i32 61, i32 31, i32 63>
273 %tmp5 = add <32 x i16> %tmp3, %tmp4
274 store <32 x i16> %tmp5, ptr %a
278 define void @trn_v16i16(ptr %a, ptr %b) #0 {
279 ; VBITS_EQ_256-LABEL: trn_v16i16:
280 ; VBITS_EQ_256: // %bb.0:
281 ; VBITS_EQ_256-NEXT: ptrue p0.h
282 ; VBITS_EQ_256-NEXT: ld1h { z0.h }, p0/z, [x0]
283 ; VBITS_EQ_256-NEXT: ld1h { z1.h }, p0/z, [x1]
284 ; VBITS_EQ_256-NEXT: trn1 z2.h, z0.h, z1.h
285 ; VBITS_EQ_256-NEXT: trn2 z0.h, z0.h, z1.h
286 ; VBITS_EQ_256-NEXT: add z0.h, z2.h, z0.h
287 ; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0]
288 ; VBITS_EQ_256-NEXT: ret
290 ; VBITS_EQ_512-LABEL: trn_v16i16:
291 ; VBITS_EQ_512: // %bb.0:
292 ; VBITS_EQ_512-NEXT: ptrue p0.h, vl16
293 ; VBITS_EQ_512-NEXT: ld1h { z0.h }, p0/z, [x0]
294 ; VBITS_EQ_512-NEXT: ld1h { z1.h }, p0/z, [x1]
295 ; VBITS_EQ_512-NEXT: trn1 z2.h, z0.h, z1.h
296 ; VBITS_EQ_512-NEXT: trn2 z0.h, z0.h, z1.h
297 ; VBITS_EQ_512-NEXT: add z0.h, z2.h, z0.h
298 ; VBITS_EQ_512-NEXT: st1h { z0.h }, p0, [x0]
299 ; VBITS_EQ_512-NEXT: ret
300 %tmp1 = load <16 x i16>, ptr %a
301 %tmp2 = load <16 x i16>, ptr %b
302 %tmp3 = shufflevector <16 x i16> %tmp1, <16 x i16> %tmp2, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
303 %tmp4 = shufflevector <16 x i16> %tmp1, <16 x i16> %tmp2, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
304 %tmp5 = add <16 x i16> %tmp3, %tmp4
305 store <16 x i16> %tmp5, ptr %a
309 define void @trn_v8i32(ptr %a, ptr %b) #0 {
310 ; VBITS_EQ_256-LABEL: trn_v8i32:
311 ; VBITS_EQ_256: // %bb.0:
312 ; VBITS_EQ_256-NEXT: ptrue p0.s
313 ; VBITS_EQ_256-NEXT: ld1w { z0.s }, p0/z, [x0]
314 ; VBITS_EQ_256-NEXT: ld1w { z1.s }, p0/z, [x1]
315 ; VBITS_EQ_256-NEXT: trn1 z2.s, z0.s, z1.s
316 ; VBITS_EQ_256-NEXT: trn2 z0.s, z0.s, z1.s
317 ; VBITS_EQ_256-NEXT: add z0.s, z2.s, z0.s
318 ; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0]
319 ; VBITS_EQ_256-NEXT: ret
321 ; VBITS_EQ_512-LABEL: trn_v8i32:
322 ; VBITS_EQ_512: // %bb.0:
323 ; VBITS_EQ_512-NEXT: ptrue p0.s, vl8
324 ; VBITS_EQ_512-NEXT: ld1w { z0.s }, p0/z, [x0]
325 ; VBITS_EQ_512-NEXT: ld1w { z1.s }, p0/z, [x1]
326 ; VBITS_EQ_512-NEXT: trn1 z2.s, z0.s, z1.s
327 ; VBITS_EQ_512-NEXT: trn2 z0.s, z0.s, z1.s
328 ; VBITS_EQ_512-NEXT: add z0.s, z2.s, z0.s
329 ; VBITS_EQ_512-NEXT: st1w { z0.s }, p0, [x0]
330 ; VBITS_EQ_512-NEXT: ret
331 %tmp1 = load <8 x i32>, ptr %a
332 %tmp2 = load <8 x i32>, ptr %b
333 %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> <i32 0, i32 8, i32 undef, i32 undef, i32 4, i32 12, i32 6, i32 14>
334 %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> <i32 1, i32 undef, i32 3, i32 11, i32 5, i32 13, i32 undef, i32 undef>
335 %tmp5 = add <8 x i32> %tmp3, %tmp4
336 store <8 x i32> %tmp5, ptr %a
340 define void @trn_v4f64(ptr %a, ptr %b) #0 {
341 ; VBITS_EQ_256-LABEL: trn_v4f64:
342 ; VBITS_EQ_256: // %bb.0:
343 ; VBITS_EQ_256-NEXT: ptrue p0.d
344 ; VBITS_EQ_256-NEXT: ld1d { z0.d }, p0/z, [x0]
345 ; VBITS_EQ_256-NEXT: ld1d { z1.d }, p0/z, [x1]
346 ; VBITS_EQ_256-NEXT: trn1 z2.d, z0.d, z1.d
347 ; VBITS_EQ_256-NEXT: trn2 z0.d, z0.d, z1.d
348 ; VBITS_EQ_256-NEXT: fadd z0.d, z2.d, z0.d
349 ; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x0]
350 ; VBITS_EQ_256-NEXT: ret
352 ; VBITS_EQ_512-LABEL: trn_v4f64:
353 ; VBITS_EQ_512: // %bb.0:
354 ; VBITS_EQ_512-NEXT: ptrue p0.d, vl4
355 ; VBITS_EQ_512-NEXT: ld1d { z0.d }, p0/z, [x0]
356 ; VBITS_EQ_512-NEXT: ld1d { z1.d }, p0/z, [x1]
357 ; VBITS_EQ_512-NEXT: trn1 z2.d, z0.d, z1.d
358 ; VBITS_EQ_512-NEXT: trn2 z0.d, z0.d, z1.d
359 ; VBITS_EQ_512-NEXT: fadd z0.d, p0/m, z0.d, z2.d
360 ; VBITS_EQ_512-NEXT: st1d { z0.d }, p0, [x0]
361 ; VBITS_EQ_512-NEXT: ret
362 %tmp1 = load <4 x double>, ptr %a
363 %tmp2 = load <4 x double>, ptr %b
364 %tmp3 = shufflevector <4 x double> %tmp1, <4 x double> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
365 %tmp4 = shufflevector <4 x double> %tmp1, <4 x double> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
366 %tmp5 = fadd <4 x double> %tmp3, %tmp4
367 store <4 x double> %tmp5, ptr %a
371 ; Don't use SVE for 128-bit vectors
372 define void @trn_v4f32(ptr %a, ptr %b) #0 {
373 ; CHECK-LABEL: trn_v4f32:
375 ; CHECK-NEXT: ldr q0, [x0]
376 ; CHECK-NEXT: ldr q1, [x1]
377 ; CHECK-NEXT: trn1 v2.4s, v0.4s, v1.4s
378 ; CHECK-NEXT: trn2 v0.4s, v0.4s, v1.4s
379 ; CHECK-NEXT: fadd v0.4s, v2.4s, v0.4s
380 ; CHECK-NEXT: str q0, [x0]
382 %tmp1 = load <4 x float>, ptr %a
383 %tmp2 = load <4 x float>, ptr %b
384 %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
385 %tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
386 %tmp5 = fadd <4 x float> %tmp3, %tmp4
387 store <4 x float> %tmp5, ptr %a
391 define void @trn_v8i32_undef(ptr %a) #0 {
392 ; VBITS_EQ_256-LABEL: trn_v8i32_undef:
393 ; VBITS_EQ_256: // %bb.0:
394 ; VBITS_EQ_256-NEXT: ptrue p0.s
395 ; VBITS_EQ_256-NEXT: ld1w { z0.s }, p0/z, [x0]
396 ; VBITS_EQ_256-NEXT: trn1 z1.s, z0.s, z0.s
397 ; VBITS_EQ_256-NEXT: trn2 z0.s, z0.s, z0.s
398 ; VBITS_EQ_256-NEXT: add z0.s, z1.s, z0.s
399 ; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0]
400 ; VBITS_EQ_256-NEXT: ret
402 ; VBITS_EQ_512-LABEL: trn_v8i32_undef:
403 ; VBITS_EQ_512: // %bb.0:
404 ; VBITS_EQ_512-NEXT: ptrue p0.s, vl8
405 ; VBITS_EQ_512-NEXT: ld1w { z0.s }, p0/z, [x0]
406 ; VBITS_EQ_512-NEXT: trn1 z1.s, z0.s, z0.s
407 ; VBITS_EQ_512-NEXT: trn2 z0.s, z0.s, z0.s
408 ; VBITS_EQ_512-NEXT: add z0.s, z1.s, z0.s
409 ; VBITS_EQ_512-NEXT: st1w { z0.s }, p0, [x0]
410 ; VBITS_EQ_512-NEXT: ret
411 %tmp1 = load <8 x i32>, ptr %a
412 %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
413 %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
414 %tmp5 = add <8 x i32> %tmp3, %tmp4
415 store <8 x i32> %tmp5, ptr %a
419 ; Emit zip2 instruction for v32i8 shuffle with vscale_range(2,2),
420 ; since the size of v32i8 is the same as the runtime vector length.
421 define void @zip2_v32i8(ptr %a, ptr %b) #1 {
422 ; CHECK-LABEL: zip2_v32i8:
424 ; CHECK-NEXT: ptrue p0.b
425 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
426 ; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1]
427 ; CHECK-NEXT: zip2 z0.b, z0.b, z1.b
428 ; CHECK-NEXT: st1b { z0.b }, p0, [x0]
430 %tmp1 = load volatile <32 x i8>, ptr %a
431 %tmp2 = load volatile <32 x i8>, ptr %b
432 %tmp3 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
433 store volatile <32 x i8> %tmp3, ptr %a
437 ; Emit zip2 instruction for v16i16 shuffle with vscale_range(2,2),
438 ; since the size of v16i16 is the same as the runtime vector length.
439 define void @zip2_v16i16(ptr %a, ptr %b) #1 {
440 ; CHECK-LABEL: zip2_v16i16:
442 ; CHECK-NEXT: ptrue p0.h
443 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
444 ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
445 ; CHECK-NEXT: zip2 z0.h, z0.h, z1.h
446 ; CHECK-NEXT: st1h { z0.h }, p0, [x0]
448 %tmp1 = load volatile <16 x i16>, ptr %a
449 %tmp2 = load volatile <16 x i16>, ptr %b
450 %tmp3 = shufflevector <16 x i16> %tmp1, <16 x i16> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
451 store volatile <16 x i16> %tmp3, ptr %a
455 ; Emit zip2 instruction for v8i32 shuffle with vscale_range(2,2),
456 ; since the size of v8i32 is the same as the runtime vector length.
457 define void @zip2_v8i32(ptr %a, ptr %b) #1 {
458 ; CHECK-LABEL: zip2_v8i32:
460 ; CHECK-NEXT: ptrue p0.s
461 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
462 ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
463 ; CHECK-NEXT: zip2 z0.s, z0.s, z1.s
464 ; CHECK-NEXT: st1w { z0.s }, p0, [x0]
466 %tmp1 = load volatile <8 x i32>, ptr %a
467 %tmp2 = load volatile <8 x i32>, ptr %b
468 %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
469 store volatile <8 x i32> %tmp3, ptr %a
473 ; Emit zip2 instruction for v8i32 and undef shuffle with vscale_range(2,2)
474 define void @zip2_v8i32_undef(ptr %a) #1 {
475 ; CHECK-LABEL: zip2_v8i32_undef:
477 ; CHECK-NEXT: ptrue p0.s
478 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
479 ; CHECK-NEXT: zip2 z0.s, z0.s, z0.s
480 ; CHECK-NEXT: st1w { z0.s }, p0, [x0]
482 %tmp1 = load volatile <8 x i32>, ptr %a
483 %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
484 store volatile <8 x i32> %tmp2, ptr %a
488 ; Emit uzp1/2 instruction for v32i8 shuffle with vscale_range(2,2),
489 ; since the size of v32i8 is the same as the runtime vector length.
490 define void @uzp_v32i8(ptr %a, ptr %b) #1 {
491 ; CHECK-LABEL: uzp_v32i8:
493 ; CHECK-NEXT: ptrue p0.b
494 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
495 ; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1]
496 ; CHECK-NEXT: uzp1 z2.b, z0.b, z1.b
497 ; CHECK-NEXT: uzp2 z0.b, z0.b, z1.b
498 ; CHECK-NEXT: add z0.b, z2.b, z0.b
499 ; CHECK-NEXT: st1b { z0.b }, p0, [x0]
501 %tmp1 = load <32 x i8>, ptr %a
502 %tmp2 = load <32 x i8>, ptr %b
503 %tmp3 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
504 %tmp4 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 1, i32 3, i32 5, i32 undef, i32 9, i32 11, i32 13, i32 undef, i32 undef, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
505 %tmp5 = add <32 x i8> %tmp3, %tmp4
506 store <32 x i8> %tmp5, ptr %a
510 ; Emit uzp1/2 instruction for v32i16 shuffle with vscale_range(2,2),
511 ; v32i16 will be expanded into two v16i16, and the size of v16i16 is
512 ; the same as the runtime vector length.
513 define void @uzp_v32i16(ptr %a, ptr %b) #1 {
514 ; CHECK-LABEL: uzp_v32i16:
516 ; CHECK-NEXT: ptrue p0.h
517 ; CHECK-NEXT: mov x8, #16 // =0x10
518 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x1, x8, lsl #1]
519 ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
520 ; CHECK-NEXT: ld1h { z2.h }, p0/z, [x0, x8, lsl #1]
521 ; CHECK-NEXT: ld1h { z3.h }, p0/z, [x0]
522 ; CHECK-NEXT: uzp1 z4.h, z1.h, z0.h
523 ; CHECK-NEXT: uzp2 z0.h, z1.h, z0.h
524 ; CHECK-NEXT: uzp1 z1.h, z3.h, z2.h
525 ; CHECK-NEXT: uzp2 z2.h, z3.h, z2.h
526 ; CHECK-NEXT: add z0.h, z4.h, z0.h
527 ; CHECK-NEXT: add z1.h, z1.h, z2.h
528 ; CHECK-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
529 ; CHECK-NEXT: st1h { z1.h }, p0, [x0]
531 %tmp1 = load <32 x i16>, ptr %a
532 %tmp2 = load <32 x i16>, ptr %b
533 %tmp3 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
534 %tmp4 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> <i32 1, i32 3, i32 5, i32 undef, i32 9, i32 11, i32 13, i32 undef, i32 undef, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
535 %tmp5 = add <32 x i16> %tmp3, %tmp4
536 store <32 x i16> %tmp5, ptr %a
540 ; Emit uzp1/2 instruction for v16i16 shuffle with vscale_range(2,2),
541 ; since the size of v16i16 is the same as the runtime vector length.
542 define void @uzp_v16i16(ptr %a, ptr %b) #1 {
543 ; CHECK-LABEL: uzp_v16i16:
545 ; CHECK-NEXT: ptrue p0.h
546 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
547 ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
548 ; CHECK-NEXT: uzp1 z2.h, z0.h, z1.h
549 ; CHECK-NEXT: uzp2 z0.h, z0.h, z1.h
550 ; CHECK-NEXT: add z0.h, z2.h, z0.h
551 ; CHECK-NEXT: st1h { z0.h }, p0, [x0]
553 %tmp1 = load <16 x i16>, ptr %a
554 %tmp2 = load <16 x i16>, ptr %b
555 %tmp3 = shufflevector <16 x i16> %tmp1, <16 x i16> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
556 %tmp4 = shufflevector <16 x i16> %tmp1, <16 x i16> %tmp2, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
557 %tmp5 = add <16 x i16> %tmp3, %tmp4
558 store <16 x i16> %tmp5, ptr %a
562 ; Emit uzp1/2 instruction for v8f32 shuffle with vscale_range(2,2),
563 ; since the size of v8f32 is the same as the runtime vector length.
564 define void @uzp_v8f32(ptr %a, ptr %b) #1 {
565 ; CHECK-LABEL: uzp_v8f32:
567 ; CHECK-NEXT: ptrue p0.s
568 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
569 ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
570 ; CHECK-NEXT: uzp1 z2.s, z0.s, z1.s
571 ; CHECK-NEXT: uzp2 z0.s, z0.s, z1.s
572 ; CHECK-NEXT: fadd z0.s, z2.s, z0.s
573 ; CHECK-NEXT: st1w { z0.s }, p0, [x0]
575 %tmp1 = load <8 x float>, ptr %a
576 %tmp2 = load <8 x float>, ptr %b
577 %tmp3 = shufflevector <8 x float> %tmp1, <8 x float> %tmp2, <8 x i32> <i32 0, i32 undef, i32 4, i32 6, i32 undef, i32 10, i32 12, i32 14>
578 %tmp4 = shufflevector <8 x float> %tmp1, <8 x float> %tmp2, <8 x i32> <i32 1, i32 undef, i32 5, i32 7, i32 9, i32 11, i32 undef, i32 undef>
579 %tmp5 = fadd <8 x float> %tmp3, %tmp4
580 store <8 x float> %tmp5, ptr %a
584 ; Emit uzp1/2 instruction for v4i64 shuffle with vscale_range(2,2),
585 ; since the size of v4i64 is the same as the runtime vector length.
586 define void @uzp_v4i64(ptr %a, ptr %b) #1 {
587 ; CHECK-LABEL: uzp_v4i64:
589 ; CHECK-NEXT: ptrue p0.d
590 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
591 ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1]
592 ; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d
593 ; CHECK-NEXT: uzp2 z0.d, z0.d, z1.d
594 ; CHECK-NEXT: add z0.d, z2.d, z0.d
595 ; CHECK-NEXT: st1d { z0.d }, p0, [x0]
597 %tmp1 = load <4 x i64>, ptr %a
598 %tmp2 = load <4 x i64>, ptr %b
599 %tmp3 = shufflevector <4 x i64> %tmp1, <4 x i64> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
600 %tmp4 = shufflevector <4 x i64> %tmp1, <4 x i64> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
601 %tmp5 = add <4 x i64> %tmp3, %tmp4
602 store <4 x i64> %tmp5, ptr %a
606 ; Don't use SVE for 128-bit vectors
607 define void @uzp_v8i16(ptr %a, ptr %b) #1 {
608 ; CHECK-LABEL: uzp_v8i16:
610 ; CHECK-NEXT: ldr q0, [x0]
611 ; CHECK-NEXT: ldr q1, [x1]
612 ; CHECK-NEXT: uzp1 v2.8h, v0.8h, v1.8h
613 ; CHECK-NEXT: uzp2 v0.8h, v0.8h, v1.8h
614 ; CHECK-NEXT: eor v0.16b, v2.16b, v0.16b
615 ; CHECK-NEXT: str q0, [x0]
617 %tmp1 = load <8 x i16>, ptr %a
618 %tmp2 = load <8 x i16>, ptr %b
619 %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
620 %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
621 %tmp5 = xor <8 x i16> %tmp3, %tmp4
622 store <8 x i16> %tmp5, ptr %a
626 ; Emit uzp1/2 instruction for v8i32 and undef shuffle with vscale_range(2,2)
627 define void @uzp_v8i32_undef(ptr %a) #1 {
628 ; CHECK-LABEL: uzp_v8i32_undef:
630 ; CHECK-NEXT: ptrue p0.s
631 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
632 ; CHECK-NEXT: uzp1 z1.s, z0.s, z0.s
633 ; CHECK-NEXT: uzp2 z0.s, z0.s, z0.s
634 ; CHECK-NEXT: add z0.s, z1.s, z0.s
635 ; CHECK-NEXT: st1w { z0.s }, p0, [x0]
637 %tmp1 = load <8 x i32>, ptr %a
638 %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 0, i32 2, i32 4, i32 6>
639 %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 1, i32 3, i32 5, i32 7>
640 %tmp5 = add <8 x i32> %tmp3, %tmp4
641 store <8 x i32> %tmp5, ptr %a
645 ; Only zip1 can be emitted safely with vscale_range(2,4).
646 ; vscale_range(2,4) means different min/max vector sizes, zip2 relies on
647 ; knowing which indices represent the high half of sve vector register.
648 define void @zip_vscale2_4(ptr %a, ptr %b) #2 {
649 ; CHECK-LABEL: zip_vscale2_4:
651 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
652 ; CHECK-NEXT: sub x9, sp, #48
653 ; CHECK-NEXT: mov x29, sp
654 ; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0
655 ; CHECK-NEXT: .cfi_def_cfa w29, 16
656 ; CHECK-NEXT: .cfi_offset w30, -8
657 ; CHECK-NEXT: .cfi_offset w29, -16
658 ; CHECK-NEXT: ptrue p0.d, vl4
659 ; CHECK-NEXT: mov x8, sp
660 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x1]
661 ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0]
662 ; CHECK-NEXT: mov z2.d, z0.d[3]
663 ; CHECK-NEXT: mov z3.d, z1.d[3]
664 ; CHECK-NEXT: mov z4.d, z0.d[2]
665 ; CHECK-NEXT: mov z5.d, z1.d[2]
666 ; CHECK-NEXT: zip1 z0.d, z1.d, z0.d
667 ; CHECK-NEXT: stp d3, d2, [sp, #16]
668 ; CHECK-NEXT: stp d5, d4, [sp]
669 ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x8]
670 ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d
671 ; CHECK-NEXT: st1d { z0.d }, p0, [x0]
672 ; CHECK-NEXT: mov sp, x29
673 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
675 %tmp1 = load <4 x double>, ptr %a
676 %tmp2 = load <4 x double>, ptr %b
677 %tmp3 = shufflevector <4 x double> %tmp1, <4 x double> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
678 %tmp4 = shufflevector <4 x double> %tmp1, <4 x double> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
679 %tmp5 = fadd <4 x double> %tmp3, %tmp4
680 store <4 x double> %tmp5, ptr %a
684 attributes #0 = { "target-features"="+sve" }
685 attributes #1 = { "target-features"="+sve" vscale_range(2,2) }
686 attributes #2 = { "target-features"="+sve" vscale_range(2,4) }