1 ; RUN: llc -mtriple=arm64-none-linux-gnu < %s | FileCheck %s
3 define <8 x i8> @cmeq8xi8(<8 x i8> %A, <8 x i8> %B) {
4 ;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
5 %tmp3 = icmp eq <8 x i8> %A, %B;
6 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
10 define <16 x i8> @cmeq16xi8(<16 x i8> %A, <16 x i8> %B) {
11 ;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
12 %tmp3 = icmp eq <16 x i8> %A, %B;
13 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
17 define <4 x i16> @cmeq4xi16(<4 x i16> %A, <4 x i16> %B) {
18 ;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
19 %tmp3 = icmp eq <4 x i16> %A, %B;
20 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
24 define <8 x i16> @cmeq8xi16(<8 x i16> %A, <8 x i16> %B) {
25 ;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
26 %tmp3 = icmp eq <8 x i16> %A, %B;
27 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
31 define <2 x i32> @cmeq2xi32(<2 x i32> %A, <2 x i32> %B) {
32 ;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
33 %tmp3 = icmp eq <2 x i32> %A, %B;
34 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
38 define <4 x i32> @cmeq4xi32(<4 x i32> %A, <4 x i32> %B) {
39 ;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
40 %tmp3 = icmp eq <4 x i32> %A, %B;
41 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
45 define <2 x i64> @cmeq2xi64(<2 x i64> %A, <2 x i64> %B) {
46 ;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
47 %tmp3 = icmp eq <2 x i64> %A, %B;
48 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
52 define <8 x i8> @cmne8xi8(<8 x i8> %A, <8 x i8> %B) {
53 ;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
54 ;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
55 %tmp3 = icmp ne <8 x i8> %A, %B;
56 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
60 define <16 x i8> @cmne16xi8(<16 x i8> %A, <16 x i8> %B) {
61 ;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
62 ;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
63 %tmp3 = icmp ne <16 x i8> %A, %B;
64 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
68 define <4 x i16> @cmne4xi16(<4 x i16> %A, <4 x i16> %B) {
69 ;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
70 ;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
71 %tmp3 = icmp ne <4 x i16> %A, %B;
72 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
76 define <8 x i16> @cmne8xi16(<8 x i16> %A, <8 x i16> %B) {
77 ;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
78 ;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
79 %tmp3 = icmp ne <8 x i16> %A, %B;
80 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
84 define <2 x i32> @cmne2xi32(<2 x i32> %A, <2 x i32> %B) {
85 ;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
86 ;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
87 %tmp3 = icmp ne <2 x i32> %A, %B;
88 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
92 define <4 x i32> @cmne4xi32(<4 x i32> %A, <4 x i32> %B) {
93 ;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
94 ;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
95 %tmp3 = icmp ne <4 x i32> %A, %B;
96 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
100 define <2 x i64> @cmne2xi64(<2 x i64> %A, <2 x i64> %B) {
101 ;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
102 ;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
103 %tmp3 = icmp ne <2 x i64> %A, %B;
104 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
108 define <8 x i8> @cmgt8xi8(<8 x i8> %A, <8 x i8> %B) {
109 ;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
110 %tmp3 = icmp sgt <8 x i8> %A, %B;
111 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
115 define <16 x i8> @cmgt16xi8(<16 x i8> %A, <16 x i8> %B) {
116 ;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
117 %tmp3 = icmp sgt <16 x i8> %A, %B;
118 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
122 define <4 x i16> @cmgt4xi16(<4 x i16> %A, <4 x i16> %B) {
123 ;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
124 %tmp3 = icmp sgt <4 x i16> %A, %B;
125 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
129 define <8 x i16> @cmgt8xi16(<8 x i16> %A, <8 x i16> %B) {
130 ;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
131 %tmp3 = icmp sgt <8 x i16> %A, %B;
132 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
136 define <2 x i32> @cmgt2xi32(<2 x i32> %A, <2 x i32> %B) {
137 ;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
138 %tmp3 = icmp sgt <2 x i32> %A, %B;
139 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
143 define <4 x i32> @cmgt4xi32(<4 x i32> %A, <4 x i32> %B) {
144 ;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
145 %tmp3 = icmp sgt <4 x i32> %A, %B;
146 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
150 define <2 x i64> @cmgt2xi64(<2 x i64> %A, <2 x i64> %B) {
151 ;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
152 %tmp3 = icmp sgt <2 x i64> %A, %B;
153 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
157 define <8 x i8> @cmlt8xi8(<8 x i8> %A, <8 x i8> %B) {
158 ; Using registers other than v0, v1 are possible, but would be odd.
159 ; LT implemented as GT, so check reversed operands.
160 ;CHECK: cmgt {{v[0-9]+}}.8b, v1.8b, v0.8b
161 %tmp3 = icmp slt <8 x i8> %A, %B;
162 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
166 define <16 x i8> @cmlt16xi8(<16 x i8> %A, <16 x i8> %B) {
167 ; Using registers other than v0, v1 are possible, but would be odd.
168 ; LT implemented as GT, so check reversed operands.
169 ;CHECK: cmgt {{v[0-9]+}}.16b, v1.16b, v0.16b
170 %tmp3 = icmp slt <16 x i8> %A, %B;
171 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
175 define <4 x i16> @cmlt4xi16(<4 x i16> %A, <4 x i16> %B) {
176 ; Using registers other than v0, v1 are possible, but would be odd.
177 ; LT implemented as GT, so check reversed operands.
178 ;CHECK: cmgt {{v[0-9]+}}.4h, v1.4h, v0.4h
179 %tmp3 = icmp slt <4 x i16> %A, %B;
180 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
184 define <8 x i16> @cmlt8xi16(<8 x i16> %A, <8 x i16> %B) {
185 ; Using registers other than v0, v1 are possible, but would be odd.
186 ; LT implemented as GT, so check reversed operands.
187 ;CHECK: cmgt {{v[0-9]+}}.8h, v1.8h, v0.8h
188 %tmp3 = icmp slt <8 x i16> %A, %B;
189 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
193 define <2 x i32> @cmlt2xi32(<2 x i32> %A, <2 x i32> %B) {
194 ; Using registers other than v0, v1 are possible, but would be odd.
195 ; LT implemented as GT, so check reversed operands.
196 ;CHECK: cmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
197 %tmp3 = icmp slt <2 x i32> %A, %B;
198 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
202 define <4 x i32> @cmlt4xi32(<4 x i32> %A, <4 x i32> %B) {
203 ; Using registers other than v0, v1 are possible, but would be odd.
204 ; LT implemented as GT, so check reversed operands.
205 ;CHECK: cmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
206 %tmp3 = icmp slt <4 x i32> %A, %B;
207 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
211 define <2 x i64> @cmlt2xi64(<2 x i64> %A, <2 x i64> %B) {
212 ; Using registers other than v0, v1 are possible, but would be odd.
213 ; LT implemented as GT, so check reversed operands.
214 ;CHECK: cmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
215 %tmp3 = icmp slt <2 x i64> %A, %B;
216 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
220 define <8 x i8> @cmge8xi8(<8 x i8> %A, <8 x i8> %B) {
221 ;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
222 %tmp3 = icmp sge <8 x i8> %A, %B;
223 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
227 define <16 x i8> @cmge16xi8(<16 x i8> %A, <16 x i8> %B) {
228 ;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
229 %tmp3 = icmp sge <16 x i8> %A, %B;
230 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
234 define <4 x i16> @cmge4xi16(<4 x i16> %A, <4 x i16> %B) {
235 ;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
236 %tmp3 = icmp sge <4 x i16> %A, %B;
237 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
241 define <8 x i16> @cmge8xi16(<8 x i16> %A, <8 x i16> %B) {
242 ;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
243 %tmp3 = icmp sge <8 x i16> %A, %B;
244 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
248 define <2 x i32> @cmge2xi32(<2 x i32> %A, <2 x i32> %B) {
249 ;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
250 %tmp3 = icmp sge <2 x i32> %A, %B;
251 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
255 define <4 x i32> @cmge4xi32(<4 x i32> %A, <4 x i32> %B) {
256 ;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
257 %tmp3 = icmp sge <4 x i32> %A, %B;
258 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
262 define <2 x i64> @cmge2xi64(<2 x i64> %A, <2 x i64> %B) {
263 ;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
264 %tmp3 = icmp sge <2 x i64> %A, %B;
265 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
269 define <8 x i8> @cmle8xi8(<8 x i8> %A, <8 x i8> %B) {
270 ; Using registers other than v0, v1 are possible, but would be odd.
271 ; LE implemented as GE, so check reversed operands.
272 ;CHECK: cmge {{v[0-9]+}}.8b, v1.8b, v0.8b
273 %tmp3 = icmp sle <8 x i8> %A, %B;
274 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
278 define <16 x i8> @cmle16xi8(<16 x i8> %A, <16 x i8> %B) {
279 ; Using registers other than v0, v1 are possible, but would be odd.
280 ; LE implemented as GE, so check reversed operands.
281 ;CHECK: cmge {{v[0-9]+}}.16b, v1.16b, v0.16b
282 %tmp3 = icmp sle <16 x i8> %A, %B;
283 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
287 define <4 x i16> @cmle4xi16(<4 x i16> %A, <4 x i16> %B) {
288 ; Using registers other than v0, v1 are possible, but would be odd.
289 ; LE implemented as GE, so check reversed operands.
290 ;CHECK: cmge {{v[0-9]+}}.4h, v1.4h, v0.4h
291 %tmp3 = icmp sle <4 x i16> %A, %B;
292 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
296 define <8 x i16> @cmle8xi16(<8 x i16> %A, <8 x i16> %B) {
297 ; Using registers other than v0, v1 are possible, but would be odd.
298 ; LE implemented as GE, so check reversed operands.
299 ;CHECK: cmge {{v[0-9]+}}.8h, v1.8h, v0.8h
300 %tmp3 = icmp sle <8 x i16> %A, %B;
301 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
305 define <2 x i32> @cmle2xi32(<2 x i32> %A, <2 x i32> %B) {
306 ; Using registers other than v0, v1 are possible, but would be odd.
307 ; LE implemented as GE, so check reversed operands.
308 ;CHECK: cmge {{v[0-9]+}}.2s, v1.2s, v0.2s
309 %tmp3 = icmp sle <2 x i32> %A, %B;
310 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
314 define <4 x i32> @cmle4xi32(<4 x i32> %A, <4 x i32> %B) {
315 ; Using registers other than v0, v1 are possible, but would be odd.
316 ; LE implemented as GE, so check reversed operands.
317 ;CHECK: cmge {{v[0-9]+}}.4s, v1.4s, v0.4s
318 %tmp3 = icmp sle <4 x i32> %A, %B;
319 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
323 define <2 x i64> @cmle2xi64(<2 x i64> %A, <2 x i64> %B) {
324 ; Using registers other than v0, v1 are possible, but would be odd.
325 ; LE implemented as GE, so check reversed operands.
326 ;CHECK: cmge {{v[0-9]+}}.2d, v1.2d, v0.2d
327 %tmp3 = icmp sle <2 x i64> %A, %B;
328 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
332 define <8 x i8> @cmhi8xi8(<8 x i8> %A, <8 x i8> %B) {
333 ;CHECK: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
334 %tmp3 = icmp ugt <8 x i8> %A, %B;
335 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
339 define <16 x i8> @cmhi16xi8(<16 x i8> %A, <16 x i8> %B) {
340 ;CHECK: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
341 %tmp3 = icmp ugt <16 x i8> %A, %B;
342 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
346 define <4 x i16> @cmhi4xi16(<4 x i16> %A, <4 x i16> %B) {
347 ;CHECK: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
348 %tmp3 = icmp ugt <4 x i16> %A, %B;
349 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
353 define <8 x i16> @cmhi8xi16(<8 x i16> %A, <8 x i16> %B) {
354 ;CHECK: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
355 %tmp3 = icmp ugt <8 x i16> %A, %B;
356 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
360 define <2 x i32> @cmhi2xi32(<2 x i32> %A, <2 x i32> %B) {
361 ;CHECK: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
362 %tmp3 = icmp ugt <2 x i32> %A, %B;
363 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
367 define <4 x i32> @cmhi4xi32(<4 x i32> %A, <4 x i32> %B) {
368 ;CHECK: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
369 %tmp3 = icmp ugt <4 x i32> %A, %B;
370 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
374 define <2 x i64> @cmhi2xi64(<2 x i64> %A, <2 x i64> %B) {
375 ;CHECK: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
376 %tmp3 = icmp ugt <2 x i64> %A, %B;
377 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
381 define <8 x i8> @cmlo8xi8(<8 x i8> %A, <8 x i8> %B) {
382 ; Using registers other than v0, v1 are possible, but would be odd.
383 ; LO implemented as HI, so check reversed operands.
384 ;CHECK: cmhi {{v[0-9]+}}.8b, v1.8b, v0.8b
385 %tmp3 = icmp ult <8 x i8> %A, %B;
386 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
390 define <16 x i8> @cmlo16xi8(<16 x i8> %A, <16 x i8> %B) {
391 ; Using registers other than v0, v1 are possible, but would be odd.
392 ; LO implemented as HI, so check reversed operands.
393 ;CHECK: cmhi {{v[0-9]+}}.16b, v1.16b, v0.16b
394 %tmp3 = icmp ult <16 x i8> %A, %B;
395 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
399 define <4 x i16> @cmlo4xi16(<4 x i16> %A, <4 x i16> %B) {
400 ; Using registers other than v0, v1 are possible, but would be odd.
401 ; LO implemented as HI, so check reversed operands.
402 ;CHECK: cmhi {{v[0-9]+}}.4h, v1.4h, v0.4h
403 %tmp3 = icmp ult <4 x i16> %A, %B;
404 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
408 define <8 x i16> @cmlo8xi16(<8 x i16> %A, <8 x i16> %B) {
409 ; Using registers other than v0, v1 are possible, but would be odd.
410 ; LO implemented as HI, so check reversed operands.
411 ;CHECK: cmhi {{v[0-9]+}}.8h, v1.8h, v0.8h
412 %tmp3 = icmp ult <8 x i16> %A, %B;
413 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
417 define <2 x i32> @cmlo2xi32(<2 x i32> %A, <2 x i32> %B) {
418 ; Using registers other than v0, v1 are possible, but would be odd.
419 ; LO implemented as HI, so check reversed operands.
420 ;CHECK: cmhi {{v[0-9]+}}.2s, v1.2s, v0.2s
421 %tmp3 = icmp ult <2 x i32> %A, %B;
422 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
426 define <4 x i32> @cmlo4xi32(<4 x i32> %A, <4 x i32> %B) {
427 ; Using registers other than v0, v1 are possible, but would be odd.
428 ; LO implemented as HI, so check reversed operands.
429 ;CHECK: cmhi {{v[0-9]+}}.4s, v1.4s, v0.4s
430 %tmp3 = icmp ult <4 x i32> %A, %B;
431 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
435 define <2 x i64> @cmlo2xi64(<2 x i64> %A, <2 x i64> %B) {
436 ; Using registers other than v0, v1 are possible, but would be odd.
437 ; LO implemented as HI, so check reversed operands.
438 ;CHECK: cmhi {{v[0-9]+}}.2d, v1.2d, v0.2d
439 %tmp3 = icmp ult <2 x i64> %A, %B;
440 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
444 define <8 x i8> @cmhs8xi8(<8 x i8> %A, <8 x i8> %B) {
445 ;CHECK: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
446 %tmp3 = icmp uge <8 x i8> %A, %B;
447 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
451 define <16 x i8> @cmhs16xi8(<16 x i8> %A, <16 x i8> %B) {
452 ;CHECK: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
453 %tmp3 = icmp uge <16 x i8> %A, %B;
454 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
458 define <4 x i16> @cmhs4xi16(<4 x i16> %A, <4 x i16> %B) {
459 ;CHECK: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
460 %tmp3 = icmp uge <4 x i16> %A, %B;
461 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
465 define <8 x i16> @cmhs8xi16(<8 x i16> %A, <8 x i16> %B) {
466 ;CHECK: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
467 %tmp3 = icmp uge <8 x i16> %A, %B;
468 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
472 define <2 x i32> @cmhs2xi32(<2 x i32> %A, <2 x i32> %B) {
473 ;CHECK: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
474 %tmp3 = icmp uge <2 x i32> %A, %B;
475 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
479 define <4 x i32> @cmhs4xi32(<4 x i32> %A, <4 x i32> %B) {
480 ;CHECK: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
481 %tmp3 = icmp uge <4 x i32> %A, %B;
482 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
486 define <2 x i64> @cmhs2xi64(<2 x i64> %A, <2 x i64> %B) {
487 ;CHECK: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
488 %tmp3 = icmp uge <2 x i64> %A, %B;
489 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
493 define <8 x i8> @cmls8xi8(<8 x i8> %A, <8 x i8> %B) {
494 ; Using registers other than v0, v1 are possible, but would be odd.
495 ; LS implemented as HS, so check reversed operands.
496 ;CHECK: cmhs {{v[0-9]+}}.8b, v1.8b, v0.8b
497 %tmp3 = icmp ule <8 x i8> %A, %B;
498 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
502 define <16 x i8> @cmls16xi8(<16 x i8> %A, <16 x i8> %B) {
503 ; Using registers other than v0, v1 are possible, but would be odd.
504 ; LS implemented as HS, so check reversed operands.
505 ;CHECK: cmhs {{v[0-9]+}}.16b, v1.16b, v0.16b
506 %tmp3 = icmp ule <16 x i8> %A, %B;
507 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
511 define <4 x i16> @cmls4xi16(<4 x i16> %A, <4 x i16> %B) {
512 ; Using registers other than v0, v1 are possible, but would be odd.
513 ; LS implemented as HS, so check reversed operands.
514 ;CHECK: cmhs {{v[0-9]+}}.4h, v1.4h, v0.4h
515 %tmp3 = icmp ule <4 x i16> %A, %B;
516 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
520 define <8 x i16> @cmls8xi16(<8 x i16> %A, <8 x i16> %B) {
521 ; Using registers other than v0, v1 are possible, but would be odd.
522 ; LS implemented as HS, so check reversed operands.
523 ;CHECK: cmhs {{v[0-9]+}}.8h, v1.8h, v0.8h
524 %tmp3 = icmp ule <8 x i16> %A, %B;
525 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
529 define <2 x i32> @cmls2xi32(<2 x i32> %A, <2 x i32> %B) {
530 ; Using registers other than v0, v1 are possible, but would be odd.
531 ; LS implemented as HS, so check reversed operands.
532 ;CHECK: cmhs {{v[0-9]+}}.2s, v1.2s, v0.2s
533 %tmp3 = icmp ule <2 x i32> %A, %B;
534 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
538 define <4 x i32> @cmls4xi32(<4 x i32> %A, <4 x i32> %B) {
539 ; Using registers other than v0, v1 are possible, but would be odd.
540 ; LS implemented as HS, so check reversed operands.
541 ;CHECK: cmhs {{v[0-9]+}}.4s, v1.4s, v0.4s
542 %tmp3 = icmp ule <4 x i32> %A, %B;
543 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
547 define <2 x i64> @cmls2xi64(<2 x i64> %A, <2 x i64> %B) {
548 ; Using registers other than v0, v1 are possible, but would be odd.
549 ; LS implemented as HS, so check reversed operands.
550 ;CHECK: cmhs {{v[0-9]+}}.2d, v1.2d, v0.2d
551 %tmp3 = icmp ule <2 x i64> %A, %B;
552 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
557 define <8 x i8> @cmeqz8xi8(<8 x i8> %A) {
558 ;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
559 %tmp3 = icmp eq <8 x i8> %A, zeroinitializer;
560 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
564 define <16 x i8> @cmeqz16xi8(<16 x i8> %A) {
565 ;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
566 %tmp3 = icmp eq <16 x i8> %A, zeroinitializer;
567 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
571 define <4 x i16> @cmeqz4xi16(<4 x i16> %A) {
572 ;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
573 %tmp3 = icmp eq <4 x i16> %A, zeroinitializer;
574 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
578 define <8 x i16> @cmeqz8xi16(<8 x i16> %A) {
579 ;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
580 %tmp3 = icmp eq <8 x i16> %A, zeroinitializer;
581 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
585 define <2 x i32> @cmeqz2xi32(<2 x i32> %A) {
586 ;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
587 %tmp3 = icmp eq <2 x i32> %A, zeroinitializer;
588 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
592 define <4 x i32> @cmeqz4xi32(<4 x i32> %A) {
593 ;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
594 %tmp3 = icmp eq <4 x i32> %A, zeroinitializer;
595 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
599 define <2 x i64> @cmeqz2xi64(<2 x i64> %A) {
600 ;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
601 %tmp3 = icmp eq <2 x i64> %A, zeroinitializer;
602 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
607 define <8 x i8> @cmgez8xi8(<8 x i8> %A) {
608 ;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
609 %tmp3 = icmp sge <8 x i8> %A, zeroinitializer;
610 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
614 define <16 x i8> @cmgez16xi8(<16 x i8> %A) {
615 ;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
616 %tmp3 = icmp sge <16 x i8> %A, zeroinitializer;
617 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
621 define <4 x i16> @cmgez4xi16(<4 x i16> %A) {
622 ;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
623 %tmp3 = icmp sge <4 x i16> %A, zeroinitializer;
624 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
628 define <8 x i16> @cmgez8xi16(<8 x i16> %A) {
629 ;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
630 %tmp3 = icmp sge <8 x i16> %A, zeroinitializer;
631 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
635 define <2 x i32> @cmgez2xi32(<2 x i32> %A) {
636 ;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
637 %tmp3 = icmp sge <2 x i32> %A, zeroinitializer;
638 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
642 define <4 x i32> @cmgez4xi32(<4 x i32> %A) {
643 ;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
644 %tmp3 = icmp sge <4 x i32> %A, zeroinitializer;
645 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
649 define <2 x i64> @cmgez2xi64(<2 x i64> %A) {
650 ;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
651 %tmp3 = icmp sge <2 x i64> %A, zeroinitializer;
652 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
657 define <8 x i8> @cmgtz8xi8(<8 x i8> %A) {
658 ;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
659 %tmp3 = icmp sgt <8 x i8> %A, zeroinitializer;
660 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
664 define <16 x i8> @cmgtz16xi8(<16 x i8> %A) {
665 ;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
666 %tmp3 = icmp sgt <16 x i8> %A, zeroinitializer;
667 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
671 define <4 x i16> @cmgtz4xi16(<4 x i16> %A) {
672 ;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
673 %tmp3 = icmp sgt <4 x i16> %A, zeroinitializer;
674 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
678 define <8 x i16> @cmgtz8xi16(<8 x i16> %A) {
679 ;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
680 %tmp3 = icmp sgt <8 x i16> %A, zeroinitializer;
681 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
685 define <2 x i32> @cmgtz2xi32(<2 x i32> %A) {
686 ;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
687 %tmp3 = icmp sgt <2 x i32> %A, zeroinitializer;
688 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
692 define <4 x i32> @cmgtz4xi32(<4 x i32> %A) {
693 ;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
694 %tmp3 = icmp sgt <4 x i32> %A, zeroinitializer;
695 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
699 define <2 x i64> @cmgtz2xi64(<2 x i64> %A) {
700 ;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
701 %tmp3 = icmp sgt <2 x i64> %A, zeroinitializer;
702 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
706 define <8 x i8> @cmlez8xi8(<8 x i8> %A) {
707 ;CHECK: cmle {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
708 %tmp3 = icmp sle <8 x i8> %A, zeroinitializer;
709 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
713 define <16 x i8> @cmlez16xi8(<16 x i8> %A) {
714 ;CHECK: cmle {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
715 %tmp3 = icmp sle <16 x i8> %A, zeroinitializer;
716 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
720 define <4 x i16> @cmlez4xi16(<4 x i16> %A) {
721 ;CHECK: cmle {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
722 %tmp3 = icmp sle <4 x i16> %A, zeroinitializer;
723 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
727 define <8 x i16> @cmlez8xi16(<8 x i16> %A) {
728 ;CHECK: cmle {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
729 %tmp3 = icmp sle <8 x i16> %A, zeroinitializer;
730 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
734 define <2 x i32> @cmlez2xi32(<2 x i32> %A) {
735 ;CHECK: cmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
736 %tmp3 = icmp sle <2 x i32> %A, zeroinitializer;
737 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
741 define <4 x i32> @cmlez4xi32(<4 x i32> %A) {
742 ;CHECK: cmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
743 %tmp3 = icmp sle <4 x i32> %A, zeroinitializer;
744 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
748 define <2 x i64> @cmlez2xi64(<2 x i64> %A) {
749 ;CHECK: cmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
750 %tmp3 = icmp sle <2 x i64> %A, zeroinitializer;
751 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
755 define <8 x i8> @cmltz8xi8(<8 x i8> %A) {
756 ;CHECK: cmlt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
757 %tmp3 = icmp slt <8 x i8> %A, zeroinitializer;
758 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
762 define <16 x i8> @cmltz16xi8(<16 x i8> %A) {
763 ;CHECK: cmlt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
764 %tmp3 = icmp slt <16 x i8> %A, zeroinitializer;
765 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
769 define <4 x i16> @cmltz4xi16(<4 x i16> %A) {
770 ;CHECK: cmlt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
771 %tmp3 = icmp slt <4 x i16> %A, zeroinitializer;
772 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
776 define <8 x i16> @cmltz8xi16(<8 x i16> %A) {
777 ;CHECK: cmlt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
778 %tmp3 = icmp slt <8 x i16> %A, zeroinitializer;
779 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
783 define <2 x i32> @cmltz2xi32(<2 x i32> %A) {
784 ;CHECK: cmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
785 %tmp3 = icmp slt <2 x i32> %A, zeroinitializer;
786 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
790 define <4 x i32> @cmltz4xi32(<4 x i32> %A) {
791 ;CHECK: cmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
792 %tmp3 = icmp slt <4 x i32> %A, zeroinitializer;
793 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
797 define <2 x i64> @cmltz2xi64(<2 x i64> %A) {
798 ;CHECK: cmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
799 %tmp3 = icmp slt <2 x i64> %A, zeroinitializer;
800 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
804 define <8 x i8> @cmneqz8xi8(<8 x i8> %A) {
805 ;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
806 ;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
807 %tmp3 = icmp ne <8 x i8> %A, zeroinitializer;
808 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
812 define <16 x i8> @cmneqz16xi8(<16 x i8> %A) {
813 ;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
814 ;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
815 %tmp3 = icmp ne <16 x i8> %A, zeroinitializer;
816 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
820 define <4 x i16> @cmneqz4xi16(<4 x i16> %A) {
821 ;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
822 ;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
823 %tmp3 = icmp ne <4 x i16> %A, zeroinitializer;
824 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
828 define <8 x i16> @cmneqz8xi16(<8 x i16> %A) {
829 ;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
830 ;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
831 %tmp3 = icmp ne <8 x i16> %A, zeroinitializer;
832 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
836 define <2 x i32> @cmneqz2xi32(<2 x i32> %A) {
837 ;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
838 ;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
839 %tmp3 = icmp ne <2 x i32> %A, zeroinitializer;
840 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
844 define <4 x i32> @cmneqz4xi32(<4 x i32> %A) {
845 ;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
846 ;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
847 %tmp3 = icmp ne <4 x i32> %A, zeroinitializer;
848 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
852 define <2 x i64> @cmneqz2xi64(<2 x i64> %A) {
853 ;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
854 ;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
855 %tmp3 = icmp ne <2 x i64> %A, zeroinitializer;
856 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
860 define <8 x i8> @cmhsz8xi8(<8 x i8> %A) {
861 ;CHECK: movi v[[ZERO:[0-9]+]].8b, #2
862 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, v[[ZERO]].8b
863 %tmp3 = icmp uge <8 x i8> %A, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
864 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
868 define <16 x i8> @cmhsz16xi8(<16 x i8> %A) {
869 ;CHECK: movi v[[ZERO:[0-9]+]].16b, #2
870 ;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, v[[ZERO]].16b
871 %tmp3 = icmp uge <16 x i8> %A, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
872 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
876 define <4 x i16> @cmhsz4xi16(<4 x i16> %A) {
877 ;CHECK: movi v[[ZERO:[0-9]+]].4h, #2
878 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, v[[ZERO]].4h
879 %tmp3 = icmp uge <4 x i16> %A, <i16 2, i16 2, i16 2, i16 2>
880 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
884 define <8 x i16> @cmhsz8xi16(<8 x i16> %A) {
885 ;CHECK: movi v[[ZERO:[0-9]+]].8h, #2
886 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, v[[ZERO]].8h
887 %tmp3 = icmp uge <8 x i16> %A, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
888 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
892 define <2 x i32> @cmhsz2xi32(<2 x i32> %A) {
893 ;CHECK: movi v[[ZERO:[0-9]+]].2s, #2
894 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, v[[ZERO]].2s
895 %tmp3 = icmp uge <2 x i32> %A, <i32 2, i32 2>
896 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
900 define <4 x i32> @cmhsz4xi32(<4 x i32> %A) {
901 ;CHECK: movi v[[ZERO:[0-9]+]].4s, #2
902 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, v[[ZERO]].4s
903 %tmp3 = icmp uge <4 x i32> %A, <i32 2, i32 2, i32 2, i32 2>
904 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
908 define <2 x i64> @cmhsz2xi64(<2 x i64> %A) {
909 ;CHECK: mov w[[TWO:[0-9]+]], #2
910 ;CHECK-NEXT: dup v[[ZERO:[0-9]+]].2d, x[[TWO]]
911 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, v[[ZERO]].2d
912 %tmp3 = icmp uge <2 x i64> %A, <i64 2, i64 2>
913 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
918 define <8 x i8> @cmhiz8xi8(<8 x i8> %A) {
919 ;CHECK: movi v[[ZERO:[0-9]+]].8b, #1
920 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, v[[ZERO]].8b
921 %tmp3 = icmp ugt <8 x i8> %A, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
922 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
926 define <16 x i8> @cmhiz16xi8(<16 x i8> %A) {
927 ;CHECK: movi v[[ZERO:[0-9]+]].16b, #1
928 ;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, v[[ZERO]].16b
929 %tmp3 = icmp ugt <16 x i8> %A, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
930 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
934 define <4 x i16> @cmhiz4xi16(<4 x i16> %A) {
935 ;CHECK: movi v[[ZERO:[0-9]+]].4h, #1
936 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, v[[ZERO]].4h
937 %tmp3 = icmp ugt <4 x i16> %A, <i16 1, i16 1, i16 1, i16 1>
938 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
942 define <8 x i16> @cmhiz8xi16(<8 x i16> %A) {
943 ;CHECK: movi v[[ZERO:[0-9]+]].8h, #1
944 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, v[[ZERO]].8h
945 %tmp3 = icmp ugt <8 x i16> %A, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
946 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
950 define <2 x i32> @cmhiz2xi32(<2 x i32> %A) {
951 ;CHECK: movi v[[ZERO:[0-9]+]].2s, #1
952 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, v[[ZERO]].2s
953 %tmp3 = icmp ugt <2 x i32> %A, <i32 1, i32 1>
954 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
958 define <4 x i32> @cmhiz4xi32(<4 x i32> %A) {
959 ;CHECK: movi v[[ZERO:[0-9]+]].4s, #1
960 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, v[[ZERO]].4s
961 %tmp3 = icmp ugt <4 x i32> %A, <i32 1, i32 1, i32 1, i32 1>
962 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
966 define <2 x i64> @cmhiz2xi64(<2 x i64> %A) {
967 ;CHECK: mov w[[ONE:[0-9]+]], #1
968 ;CHECK-NEXT: dup v[[ZERO:[0-9]+]].2d, x[[ONE]]
969 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, v[[ZERO]].2d
970 %tmp3 = icmp ugt <2 x i64> %A, <i64 1, i64 1>
971 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
975 define <8 x i8> @cmlsz8xi8(<8 x i8> %A) {
976 ; Using registers other than v0, v1 are possible, but would be odd.
977 ; LS implemented as HS, so check reversed operands.
978 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
979 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, v[[ZERO]].8b, v0.8b
980 %tmp3 = icmp ule <8 x i8> %A, zeroinitializer;
981 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
985 define <16 x i8> @cmlsz16xi8(<16 x i8> %A) {
986 ; Using registers other than v0, v1 are possible, but would be odd.
987 ; LS implemented as HS, so check reversed operands.
988 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
989 ;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, v[[ZERO]].16b, v0.16b
990 %tmp3 = icmp ule <16 x i8> %A, zeroinitializer;
991 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
995 define <4 x i16> @cmlsz4xi16(<4 x i16> %A) {
996 ; Using registers other than v0, v1 are possible, but would be odd.
997 ; LS implemented as HS, so check reversed operands.
998 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
999 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, v[[ZERO]].4h, v0.4h
1000 %tmp3 = icmp ule <4 x i16> %A, zeroinitializer;
1001 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
1005 define <8 x i16> @cmlsz8xi16(<8 x i16> %A) {
1006 ; Using registers other than v0, v1 are possible, but would be odd.
1007 ; LS implemented as HS, so check reversed operands.
1008 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
1009 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, v[[ZERO]].8h, v0.8h
1010 %tmp3 = icmp ule <8 x i16> %A, zeroinitializer;
1011 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
1015 define <2 x i32> @cmlsz2xi32(<2 x i32> %A) {
1016 ; Using registers other than v0, v1 are possible, but would be odd.
1017 ; LS implemented as HS, so check reversed operands.
1018 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
1019 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, v[[ZERO]].2s, v0.2s
1020 %tmp3 = icmp ule <2 x i32> %A, zeroinitializer;
1021 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1025 define <4 x i32> @cmlsz4xi32(<4 x i32> %A) {
1026 ; Using registers other than v0, v1 are possible, but would be odd.
1027 ; LS implemented as HS, so check reversed operands.
1028 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
1029 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, v[[ZERO]].4s, v0.4s
1030 %tmp3 = icmp ule <4 x i32> %A, zeroinitializer;
1031 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1035 define <2 x i64> @cmlsz2xi64(<2 x i64> %A) {
1036 ; Using registers other than v0, v1 are possible, but would be odd.
1037 ; LS implemented as HS, so check reversed operands.
1038 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
1039 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, v[[ZERO]].2d, v0.2d
1040 %tmp3 = icmp ule <2 x i64> %A, zeroinitializer;
1041 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1045 define <8 x i8> @cmloz8xi8(<8 x i8> %A) {
1046 ; Using registers other than v0, v1 are possible, but would be odd.
1047 ; LO implemented as HI, so check reversed operands.
1048 ;CHECK: movi v[[ZERO:[0-9]+]].8b, #2
1049 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, v[[ZERO]].8b, {{v[0-9]+}}.8b
1050 %tmp3 = icmp ult <8 x i8> %A, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
1051 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
1055 define <16 x i8> @cmloz16xi8(<16 x i8> %A) {
1056 ; Using registers other than v0, v1 are possible, but would be odd.
1057 ; LO implemented as HI, so check reversed operands.
1058 ;CHECK: movi v[[ZERO:[0-9]+]].16b, #2
1059 ;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, v[[ZERO]].16b, v0.16b
1060 %tmp3 = icmp ult <16 x i8> %A, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
1061 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
1065 define <4 x i16> @cmloz4xi16(<4 x i16> %A) {
1066 ; Using registers other than v0, v1 are possible, but would be odd.
1067 ; LO implemented as HI, so check reversed operands.
1068 ;CHECK: movi v[[ZERO:[0-9]+]].4h, #2
1069 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, v[[ZERO]].4h, v0.4h
1070 %tmp3 = icmp ult <4 x i16> %A, <i16 2, i16 2, i16 2, i16 2>
1071 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
1075 define <8 x i16> @cmloz8xi16(<8 x i16> %A) {
1076 ; Using registers other than v0, v1 are possible, but would be odd.
1077 ; LO implemented as HI, so check reversed operands.
1078 ;CHECK: movi v[[ZERO:[0-9]+]].8h, #2
1079 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, v[[ZERO]].8h, v0.8h
1080 %tmp3 = icmp ult <8 x i16> %A, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
1081 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
1085 define <2 x i32> @cmloz2xi32(<2 x i32> %A) {
1086 ; Using registers other than v0, v1 are possible, but would be odd.
1087 ; LO implemented as HI, so check reversed operands.
1088 ;CHECK: movi v[[ZERO:[0-9]+]].2s, #2
1089 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, v[[ZERO]].2s, v0.2s
1090 %tmp3 = icmp ult <2 x i32> %A, <i32 2, i32 2>
1091 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1095 define <4 x i32> @cmloz4xi32(<4 x i32> %A) {
1096 ; Using registers other than v0, v1 are possible, but would be odd.
1097 ; LO implemented as HI, so check reversed operands.
1098 ;CHECK: movi v[[ZERO:[0-9]+]].4s, #2
1099 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, v[[ZERO]].4s, v0.4s
1100 %tmp3 = icmp ult <4 x i32> %A, <i32 2, i32 2, i32 2, i32 2>
1101 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1105 define <2 x i64> @cmloz2xi64(<2 x i64> %A) {
1106 ; Using registers other than v0, v1 are possible, but would be odd.
1107 ; LO implemented as HI, so check reversed operands.
1108 ;CHECK: mov w[[TWO:[0-9]+]], #2
1109 ;CHECK-NEXT: dup v[[ZERO:[0-9]+]].2d, x[[TWO]]
1110 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, v[[ZERO]].2d, v0.2d
1111 %tmp3 = icmp ult <2 x i64> %A, <i64 2, i64 2>
1112 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1116 define <1 x i64> @cmeqz_v1i64(<1 x i64> %A) {
1117 ; CHECK-LABEL: cmeqz_v1i64:
1118 ; CHECK: cmeq d0, d0, #0
1119 %tst = icmp eq <1 x i64> %A, <i64 0>
1120 %mask = sext <1 x i1> %tst to <1 x i64>
1124 define <1 x i64> @cmgez_v1i64(<1 x i64> %A) {
1125 ; CHECK-LABEL: cmgez_v1i64:
1126 ; CHECK: cmge d0, d0, #0
1127 %tst = icmp sge <1 x i64> %A, <i64 0>
1128 %mask = sext <1 x i1> %tst to <1 x i64>
1132 define <1 x i64> @cmgtz_v1i64(<1 x i64> %A) {
1133 ; CHECK-LABEL: cmgtz_v1i64:
1134 ; CHECK: cmgt d0, d0, #0
1135 %tst = icmp sgt <1 x i64> %A, <i64 0>
1136 %mask = sext <1 x i1> %tst to <1 x i64>
1140 define <1 x i64> @cmlez_v1i64(<1 x i64> %A) {
1141 ; CHECK-LABEL: cmlez_v1i64:
1142 ; CHECK: cmle d0, d0, #0
1143 %tst = icmp sle <1 x i64> %A, <i64 0>
1144 %mask = sext <1 x i1> %tst to <1 x i64>
1148 define <1 x i64> @cmltz_v1i64(<1 x i64> %A) {
1149 ; CHECK-LABEL: cmltz_v1i64:
1150 ; CHECK: cmlt d0, d0, #0
1151 %tst = icmp slt <1 x i64> %A, <i64 0>
1152 %mask = sext <1 x i1> %tst to <1 x i64>
1156 define <1 x i64> @fcmeqz_v1f64(<1 x double> %A) {
1157 ; CHECK-LABEL: fcmeqz_v1f64:
1158 ; CHECK: fcmeq d0, d0, #0
1159 %tst = fcmp oeq <1 x double> %A, <double 0.0>
1160 %mask = sext <1 x i1> %tst to <1 x i64>
1164 define <1 x i64> @fcmgez_v1f64(<1 x double> %A) {
1165 ; CHECK-LABEL: fcmgez_v1f64:
1166 ; CHECK: fcmge d0, d0, #0
1167 %tst = fcmp oge <1 x double> %A, <double 0.0>
1168 %mask = sext <1 x i1> %tst to <1 x i64>
1172 define <1 x i64> @fcmgtz_v1f64(<1 x double> %A) {
1173 ; CHECK-LABEL: fcmgtz_v1f64:
1174 ; CHECK: fcmgt d0, d0, #0
1175 %tst = fcmp ogt <1 x double> %A, <double 0.0>
1176 %mask = sext <1 x i1> %tst to <1 x i64>
1180 define <1 x i64> @fcmlez_v1f64(<1 x double> %A) {
1181 ; CHECK-LABEL: fcmlez_v1f64:
1182 ; CHECK: fcmle d0, d0, #0
1183 %tst = fcmp ole <1 x double> %A, <double 0.0>
1184 %mask = sext <1 x i1> %tst to <1 x i64>
1188 define <1 x i64> @fcmltz_v1f64(<1 x double> %A) {
1189 ; CHECK-LABEL: fcmltz_v1f64:
1190 ; CHECK: fcmlt d0, d0, #0
1191 %tst = fcmp olt <1 x double> %A, <double 0.0>
1192 %mask = sext <1 x i1> %tst to <1 x i64>