1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version -5
2 ; RUN: llc %s -mtriple=aarch64 -o - | FileCheck %s
4 define <4 x i16> @NarrowAShrI32By5(<4 x i32> %x) {
5 ; CHECK-LABEL: NarrowAShrI32By5:
7 ; CHECK-NEXT: sqrshrn v0.4h, v0.4s, #5
9 %s = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %x, <4 x i32> <i32 -5, i32 -5, i32 -5, i32 -5>)
10 %r = tail call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> %s)
14 define <4 x i16> @NarrowAShrU32By5(<4 x i32> %x) {
15 ; CHECK-LABEL: NarrowAShrU32By5:
17 ; CHECK-NEXT: srshr v0.4s, v0.4s, #5
18 ; CHECK-NEXT: uqxtn v0.4h, v0.4s
20 %s = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %x, <4 x i32> <i32 -5, i32 -5, i32 -5, i32 -5>)
21 %r = tail call <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32> %s)
25 define <4 x i16> @NarrowAShrI32By5ToU16(<4 x i32> %x) {
26 ; CHECK-LABEL: NarrowAShrI32By5ToU16:
28 ; CHECK-NEXT: sqrshrun v0.4h, v0.4s, #5
30 %s = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %x, <4 x i32> <i32 -5, i32 -5, i32 -5, i32 -5>)
31 %r = tail call <4 x i16> @llvm.aarch64.neon.sqxtun.v4i16(<4 x i32> %s)
35 define <4 x i16> @NarrowLShrI32By5(<4 x i32> %x) {
36 ; CHECK-LABEL: NarrowLShrI32By5:
38 ; CHECK-NEXT: urshr v0.4s, v0.4s, #5
39 ; CHECK-NEXT: sqxtn v0.4h, v0.4s
41 %s = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %x, <4 x i32> <i32 -5, i32 -5, i32 -5, i32 -5>)
42 %r = tail call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> %s)
46 define <4 x i16> @NarrowLShrU32By5(<4 x i32> %x) {
47 ; CHECK-LABEL: NarrowLShrU32By5:
49 ; CHECK-NEXT: uqrshrn v0.4h, v0.4s, #5
51 %s = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %x, <4 x i32> <i32 -5, i32 -5, i32 -5, i32 -5>)
52 %r = tail call <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32> %s)
56 define <4 x i16> @NarrowLShrI32By5ToU16(<4 x i32> %x) {
57 ; CHECK-LABEL: NarrowLShrI32By5ToU16:
59 ; CHECK-NEXT: urshr v0.4s, v0.4s, #5
60 ; CHECK-NEXT: sqxtun v0.4h, v0.4s
62 %s = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %x, <4 x i32> <i32 -5, i32 -5, i32 -5, i32 -5>)
63 %r = tail call <4 x i16> @llvm.aarch64.neon.sqxtun.v4i16(<4 x i32> %s)
68 define <2 x i32> @NarrowAShri64By5(<2 x i64> %x) {
69 ; CHECK-LABEL: NarrowAShri64By5:
71 ; CHECK-NEXT: sqrshrn v0.2s, v0.2d, #5
73 %s = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %x, <2 x i64> <i64 -5, i64 -5>)
74 %r = tail call <2 x i32> @llvm.aarch64.neon.sqxtn.v2i32(<2 x i64> %s)
78 define <2 x i32> @NarrowAShrU64By5(<2 x i64> %x) {
79 ; CHECK-LABEL: NarrowAShrU64By5:
81 ; CHECK-NEXT: srshr v0.2d, v0.2d, #5
82 ; CHECK-NEXT: uqxtn v0.2s, v0.2d
84 %s = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %x, <2 x i64> <i64 -5, i64 -5>)
85 %r = tail call <2 x i32> @llvm.aarch64.neon.uqxtn.v2i32(<2 x i64> %s)
89 define <2 x i32> @NarrowAShri64By5ToU32(<2 x i64> %x) {
90 ; CHECK-LABEL: NarrowAShri64By5ToU32:
92 ; CHECK-NEXT: sqrshrun v0.2s, v0.2d, #5
94 %s = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %x, <2 x i64> <i64 -5, i64 -5>)
95 %r = tail call <2 x i32> @llvm.aarch64.neon.sqxtun.v2i32(<2 x i64> %s)
99 define <2 x i32> @NarrowLShri64By5(<2 x i64> %x) {
100 ; CHECK-LABEL: NarrowLShri64By5:
102 ; CHECK-NEXT: urshr v0.2d, v0.2d, #5
103 ; CHECK-NEXT: sqxtn v0.2s, v0.2d
105 %s = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %x, <2 x i64> <i64 -5, i64 -5>)
106 %r = tail call <2 x i32> @llvm.aarch64.neon.sqxtn.v2i32(<2 x i64> %s)
110 define <2 x i32> @NarrowLShrU64By5(<2 x i64> %x) {
111 ; CHECK-LABEL: NarrowLShrU64By5:
113 ; CHECK-NEXT: uqrshrn v0.2s, v0.2d, #5
115 %s = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %x, <2 x i64> <i64 -5, i64 -5>)
116 %r = tail call <2 x i32> @llvm.aarch64.neon.uqxtn.v2i32(<2 x i64> %s)
120 define <2 x i32> @NarrowLShri64By5ToU32(<2 x i64> %x) {
121 ; CHECK-LABEL: NarrowLShri64By5ToU32:
123 ; CHECK-NEXT: urshr v0.2d, v0.2d, #5
124 ; CHECK-NEXT: sqxtun v0.2s, v0.2d
126 %s = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %x, <2 x i64> <i64 -5, i64 -5>)
127 %r = tail call <2 x i32> @llvm.aarch64.neon.sqxtun.v2i32(<2 x i64> %s)
132 define <8 x i8> @NarrowAShri16By5(<8 x i16> %x) {
133 ; CHECK-LABEL: NarrowAShri16By5:
135 ; CHECK-NEXT: sqrshrn v0.8b, v0.8h, #5
137 %s = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %x, <8 x i16> <i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5>)
138 %r = tail call <8 x i8> @llvm.aarch64.neon.sqxtn.v8i8(<8 x i16> %s)
142 define <8 x i8> @NarrowAShrU16By5(<8 x i16> %x) {
143 ; CHECK-LABEL: NarrowAShrU16By5:
145 ; CHECK-NEXT: srshr v0.8h, v0.8h, #5
146 ; CHECK-NEXT: uqxtn v0.8b, v0.8h
148 %s = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %x, <8 x i16> <i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5>)
149 %r = tail call <8 x i8> @llvm.aarch64.neon.uqxtn.v8i8(<8 x i16> %s)
153 define <8 x i8> @NarrowAShri16By5ToU8(<8 x i16> %x) {
154 ; CHECK-LABEL: NarrowAShri16By5ToU8:
156 ; CHECK-NEXT: sqrshrun v0.8b, v0.8h, #5
158 %s = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %x, <8 x i16> <i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5>)
159 %r = tail call <8 x i8> @llvm.aarch64.neon.sqxtun.v8i8(<8 x i16> %s)
163 define <8 x i8> @NarrowLShri16By5(<8 x i16> %x) {
164 ; CHECK-LABEL: NarrowLShri16By5:
166 ; CHECK-NEXT: urshr v0.8h, v0.8h, #5
167 ; CHECK-NEXT: sqxtn v0.8b, v0.8h
169 %s = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %x, <8 x i16> <i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5>)
170 %r = tail call <8 x i8> @llvm.aarch64.neon.sqxtn.v8i8(<8 x i16> %s)
174 define <8 x i8> @NarrowLShrU16By5(<8 x i16> %x) {
175 ; CHECK-LABEL: NarrowLShrU16By5:
177 ; CHECK-NEXT: uqrshrn v0.8b, v0.8h, #5
179 %s = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %x, <8 x i16> <i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5>)
180 %r = tail call <8 x i8> @llvm.aarch64.neon.uqxtn.v8i8(<8 x i16> %s)
184 define <8 x i8> @NarrowLShri16By5ToU8(<8 x i16> %x) {
185 ; CHECK-LABEL: NarrowLShri16By5ToU8:
187 ; CHECK-NEXT: urshr v0.8h, v0.8h, #5
188 ; CHECK-NEXT: sqxtun v0.8b, v0.8h
190 %s = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %x, <8 x i16> <i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5>)
191 %r = tail call <8 x i8> @llvm.aarch64.neon.sqxtun.v8i8(<8 x i16> %s)
199 define <4 x i16> @NarrowAShrI32By31(<4 x i32> %x) {
200 ; CHECK-LABEL: NarrowAShrI32By31:
202 ; CHECK-NEXT: sqrshrn v0.4h, v0.4s, #16
204 %s = call <4 x i32> @llvm.aarch64.neon.srshl(<4 x i32> %x, <4 x i32> <i32 -16, i32 -16, i32 -16, i32 -16>)
205 %r = tail call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> %s)
209 define <4 x i16> @NarrowAShrI32By31ToU16(<4 x i32> %x) {
210 ; CHECK-LABEL: NarrowAShrI32By31ToU16:
212 ; CHECK-NEXT: sqrshrun v0.4h, v0.4s, #16
214 %s = call <4 x i32> @llvm.aarch64.neon.srshl(<4 x i32> %x, <4 x i32> <i32 -16, i32 -16, i32 -16, i32 -16>)
215 %r = tail call <4 x i16> @llvm.aarch64.neon.sqxtun.v4i16(<4 x i32> %s)
219 define <4 x i16> @NarrowLShrU32By31(<4 x i32> %x) {
220 ; CHECK-LABEL: NarrowLShrU32By31:
222 ; CHECK-NEXT: uqrshrn v0.4h, v0.4s, #16
224 %s = call <4 x i32> @llvm.aarch64.neon.urshl(<4 x i32> %x, <4 x i32> <i32 -16, i32 -16, i32 -16, i32 -16>)
225 %r = tail call <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32> %s)
230 define <16 x i8> @signed_minmax_v8i16_to_v16i8(<8 x i16> %x, <8 x i16> %y) {
231 ; CHECK-LABEL: signed_minmax_v8i16_to_v16i8:
232 ; CHECK: // %bb.0: // %entry
233 ; CHECK-NEXT: sqrshrn v0.8b, v0.8h, #5
234 ; CHECK-NEXT: sqrshrn2 v0.16b, v1.8h, #5
237 %l = call <8 x i16> @llvm.aarch64.neon.srshl(<8 x i16> %x, <8 x i16> <i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5>)
238 %h = call <8 x i16> @llvm.aarch64.neon.srshl(<8 x i16> %y, <8 x i16> <i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5>)
239 %s = shufflevector <8 x i16> %l, <8 x i16> %h, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
240 %min = call <16 x i16> @llvm.smin.v8i16(<16 x i16> %s, <16 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>)
241 %max = call <16 x i16> @llvm.smax.v8i16(<16 x i16> %min, <16 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>)
242 %trunc = trunc <16 x i16> %max to <16 x i8>
246 define <16 x i8> @unsigned_minmax_v8i16_to_v16i8(<8 x i16> %x, <8 x i16> %y) {
247 ; CHECK-LABEL: unsigned_minmax_v8i16_to_v16i8:
248 ; CHECK: // %bb.0: // %entry
249 ; CHECK-NEXT: uqrshrn v0.8b, v0.8h, #5
250 ; CHECK-NEXT: uqrshrn2 v0.16b, v1.8h, #5
253 %l = call <8 x i16> @llvm.aarch64.neon.urshl(<8 x i16> %x, <8 x i16> <i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5>)
254 %h = call <8 x i16> @llvm.aarch64.neon.urshl(<8 x i16> %y, <8 x i16> <i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5>)
255 %s = shufflevector <8 x i16> %l, <8 x i16> %h, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
256 %min = call <16 x i16> @llvm.umin.v8i16(<16 x i16> %s, <16 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>)
257 %trunc = trunc <16 x i16> %min to <16 x i8>
261 define <16 x i8> @unsigned_signed_minmax_v8i16_to_v16i8(<8 x i16> %x, <8 x i16> %y) {
262 ; CHECK-LABEL: unsigned_signed_minmax_v8i16_to_v16i8:
263 ; CHECK: // %bb.0: // %entry
264 ; CHECK-NEXT: sqrshrun v0.8b, v0.8h, #5
265 ; CHECK-NEXT: sqrshrun2 v0.16b, v1.8h, #5
268 %l = call <8 x i16> @llvm.aarch64.neon.srshl(<8 x i16> %x, <8 x i16> <i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5>)
269 %h = call <8 x i16> @llvm.aarch64.neon.srshl(<8 x i16> %y, <8 x i16> <i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5>)
270 %s = shufflevector <8 x i16> %l, <8 x i16> %h, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
271 %max = call <16 x i16> @llvm.smax.v8i16(<16 x i16> %s, <16 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>)
272 %min = call <16 x i16> @llvm.umin.v8i16(<16 x i16> %max, <16 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>)
273 %trunc = trunc <16 x i16> %min to <16 x i8>
278 define <8 x i16> @signed_minmax_v4i32_to_v8i16(<4 x i32> %x, <4 x i32> %y) {
279 ; CHECK-LABEL: signed_minmax_v4i32_to_v8i16:
280 ; CHECK: // %bb.0: // %entry
281 ; CHECK-NEXT: sqrshrn v0.4h, v0.4s, #5
282 ; CHECK-NEXT: sqrshrn2 v0.8h, v1.4s, #5
285 %l = call <4 x i32> @llvm.aarch64.neon.srshl(<4 x i32> %x, <4 x i32> <i32 -5, i32 -5, i32 -5, i32 -5>)
286 %h = call <4 x i32> @llvm.aarch64.neon.srshl(<4 x i32> %y, <4 x i32> <i32 -5, i32 -5, i32 -5, i32 -5>)
287 %s = shufflevector <4 x i32> %l, <4 x i32> %h, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
288 %min = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %s, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>)
289 %max = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %min, <8 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>)
290 %trunc = trunc <8 x i32> %max to <8 x i16>
294 define <8 x i16> @unsigned_minmax_v4i32_to_v8i16(<4 x i32> %x, <4 x i32> %y) {
295 ; CHECK-LABEL: unsigned_minmax_v4i32_to_v8i16:
296 ; CHECK: // %bb.0: // %entry
297 ; CHECK-NEXT: uqrshrn v0.4h, v0.4s, #5
298 ; CHECK-NEXT: uqrshrn2 v0.8h, v1.4s, #5
301 %l = call <4 x i32> @llvm.aarch64.neon.urshl(<4 x i32> %x, <4 x i32> <i32 -5, i32 -5, i32 -5, i32 -5>)
302 %h = call <4 x i32> @llvm.aarch64.neon.urshl(<4 x i32> %y, <4 x i32> <i32 -5, i32 -5, i32 -5, i32 -5>)
303 %s = shufflevector <4 x i32> %l, <4 x i32> %h, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
304 %min = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %s, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>)
305 %trunc = trunc <8 x i32> %min to <8 x i16>
309 define <8 x i16> @unsigned_signed_minmax_v4i32_to_v8i16(<4 x i32> %x, <4 x i32> %y) {
310 ; CHECK-LABEL: unsigned_signed_minmax_v4i32_to_v8i16:
311 ; CHECK: // %bb.0: // %entry
312 ; CHECK-NEXT: sqrshrun v0.4h, v0.4s, #5
313 ; CHECK-NEXT: sqrshrun2 v0.8h, v1.4s, #5
316 %l = call <4 x i32> @llvm.aarch64.neon.srshl(<4 x i32> %x, <4 x i32> <i32 -5, i32 -5, i32 -5, i32 -5>)
317 %h = call <4 x i32> @llvm.aarch64.neon.srshl(<4 x i32> %y, <4 x i32> <i32 -5, i32 -5, i32 -5, i32 -5>)
318 %s = shufflevector <4 x i32> %l, <4 x i32> %h, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
319 %max = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %s, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>)
320 %min = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %max, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>)
321 %trunc = trunc <8 x i32> %min to <8 x i16>
326 define <4 x i32> @signed_minmax_v4i64_to_v8i32(<2 x i64> %x, <2 x i64> %y) {
327 ; CHECK-LABEL: signed_minmax_v4i64_to_v8i32:
328 ; CHECK: // %bb.0: // %entry
329 ; CHECK-NEXT: sqrshrn v0.2s, v0.2d, #5
330 ; CHECK-NEXT: sqrshrn2 v0.4s, v1.2d, #5
333 %l = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %x, <2 x i64> <i64 -5, i64 -5>)
334 %h = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %y, <2 x i64> <i64 -5, i64 -5>)
335 %s = shufflevector <2 x i64> %l, <2 x i64> %h, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
336 %min = call <4 x i64> @llvm.smin.v8i64(<4 x i64> %s, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>)
337 %max = call <4 x i64> @llvm.smax.v8i64(<4 x i64> %min, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>)
338 %trunc = trunc <4 x i64> %max to <4 x i32>
342 define <4 x i32> @unsigned_minmax_v4i64_to_v8i32(<2 x i64> %x, <2 x i64> %y) {
343 ; CHECK-LABEL: unsigned_minmax_v4i64_to_v8i32:
344 ; CHECK: // %bb.0: // %entry
345 ; CHECK-NEXT: uqrshrn v0.2s, v0.2d, #5
346 ; CHECK-NEXT: uqrshrn2 v0.4s, v1.2d, #5
349 %l = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %x, <2 x i64> <i64 -5, i64 -5>)
350 %h = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %y, <2 x i64> <i64 -5, i64 -5>)
351 %s = shufflevector <2 x i64> %l, <2 x i64> %h, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
352 %min = call <4 x i64> @llvm.umin.v8i64(<4 x i64> %s, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>)
353 %trunc = trunc <4 x i64> %min to <4 x i32>
357 define <4 x i32> @unsigned_signed_minmax_v4i64_to_v8i32(<2 x i64> %x, <2 x i64> %y) {
358 ; CHECK-LABEL: unsigned_signed_minmax_v4i64_to_v8i32:
359 ; CHECK: // %bb.0: // %entry
360 ; CHECK-NEXT: sqrshrun v0.2s, v0.2d, #5
361 ; CHECK-NEXT: sqrshrun2 v0.4s, v1.2d, #5
364 %l = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %x, <2 x i64> <i64 -5, i64 -5>)
365 %h = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %y, <2 x i64> <i64 -5, i64 -5>)
366 %s = shufflevector <2 x i64> %l, <2 x i64> %h, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
367 %max = call <4 x i64> @llvm.smax.v8i64(<4 x i64> %s, <4 x i64> <i64 0, i64 0, i64 0, i64 0>)
368 %min = call <4 x i64> @llvm.umin.v8i64(<4 x i64> %max, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>)
369 %trunc = trunc <4 x i64> %min to <4 x i32>