1 ; RUN: llc -mtriple=arm64-none-linux-gnu < %s | FileCheck %s
3 define <8 x i8> @cmeq8xi8(<8 x i8> %A, <8 x i8> %B) {
4 ;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
5 %tmp3 = icmp eq <8 x i8> %A, %B;
6 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
10 define <16 x i8> @cmeq16xi8(<16 x i8> %A, <16 x i8> %B) {
11 ;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
12 %tmp3 = icmp eq <16 x i8> %A, %B;
13 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
17 define <4 x i16> @cmeq4xi16(<4 x i16> %A, <4 x i16> %B) {
18 ;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
19 %tmp3 = icmp eq <4 x i16> %A, %B;
20 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
24 define <8 x i16> @cmeq8xi16(<8 x i16> %A, <8 x i16> %B) {
25 ;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
26 %tmp3 = icmp eq <8 x i16> %A, %B;
27 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
31 define <2 x i32> @cmeq2xi32(<2 x i32> %A, <2 x i32> %B) {
32 ;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
33 %tmp3 = icmp eq <2 x i32> %A, %B;
34 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
38 define <4 x i32> @cmeq4xi32(<4 x i32> %A, <4 x i32> %B) {
39 ;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
40 %tmp3 = icmp eq <4 x i32> %A, %B;
41 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
45 define <2 x i64> @cmeq2xi64(<2 x i64> %A, <2 x i64> %B) {
46 ;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
47 %tmp3 = icmp eq <2 x i64> %A, %B;
48 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
52 define <8 x i8> @cmne8xi8(<8 x i8> %A, <8 x i8> %B) {
53 ;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
54 ;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
55 %tmp3 = icmp ne <8 x i8> %A, %B;
56 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
60 define <16 x i8> @cmne16xi8(<16 x i8> %A, <16 x i8> %B) {
61 ;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
62 ;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
63 %tmp3 = icmp ne <16 x i8> %A, %B;
64 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
68 define <4 x i16> @cmne4xi16(<4 x i16> %A, <4 x i16> %B) {
69 ;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
70 ;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
71 %tmp3 = icmp ne <4 x i16> %A, %B;
72 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
76 define <8 x i16> @cmne8xi16(<8 x i16> %A, <8 x i16> %B) {
77 ;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
78 ;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
79 %tmp3 = icmp ne <8 x i16> %A, %B;
80 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
84 define <2 x i32> @cmne2xi32(<2 x i32> %A, <2 x i32> %B) {
85 ;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
86 ;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
87 %tmp3 = icmp ne <2 x i32> %A, %B;
88 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
92 define <4 x i32> @cmne4xi32(<4 x i32> %A, <4 x i32> %B) {
93 ;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
94 ;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
95 %tmp3 = icmp ne <4 x i32> %A, %B;
96 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
100 define <2 x i64> @cmne2xi64(<2 x i64> %A, <2 x i64> %B) {
101 ;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
102 ;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
103 %tmp3 = icmp ne <2 x i64> %A, %B;
104 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
108 define <8 x i8> @cmgt8xi8(<8 x i8> %A, <8 x i8> %B) {
109 ;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
110 %tmp3 = icmp sgt <8 x i8> %A, %B;
111 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
115 define <16 x i8> @cmgt16xi8(<16 x i8> %A, <16 x i8> %B) {
116 ;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
117 %tmp3 = icmp sgt <16 x i8> %A, %B;
118 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
122 define <4 x i16> @cmgt4xi16(<4 x i16> %A, <4 x i16> %B) {
123 ;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
124 %tmp3 = icmp sgt <4 x i16> %A, %B;
125 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
129 define <8 x i16> @cmgt8xi16(<8 x i16> %A, <8 x i16> %B) {
130 ;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
131 %tmp3 = icmp sgt <8 x i16> %A, %B;
132 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
136 define <2 x i32> @cmgt2xi32(<2 x i32> %A, <2 x i32> %B) {
137 ;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
138 %tmp3 = icmp sgt <2 x i32> %A, %B;
139 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
143 define <4 x i32> @cmgt4xi32(<4 x i32> %A, <4 x i32> %B) {
144 ;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
145 %tmp3 = icmp sgt <4 x i32> %A, %B;
146 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
150 define <2 x i64> @cmgt2xi64(<2 x i64> %A, <2 x i64> %B) {
151 ;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
152 %tmp3 = icmp sgt <2 x i64> %A, %B;
153 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
157 define <8 x i8> @cmlt8xi8(<8 x i8> %A, <8 x i8> %B) {
158 ; Using registers other than v0, v1 are possible, but would be odd.
159 ; LT implemented as GT, so check reversed operands.
160 ;CHECK: cmgt {{v[0-9]+}}.8b, v1.8b, v0.8b
161 %tmp3 = icmp slt <8 x i8> %A, %B;
162 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
166 define <16 x i8> @cmlt16xi8(<16 x i8> %A, <16 x i8> %B) {
167 ; Using registers other than v0, v1 are possible, but would be odd.
168 ; LT implemented as GT, so check reversed operands.
169 ;CHECK: cmgt {{v[0-9]+}}.16b, v1.16b, v0.16b
170 %tmp3 = icmp slt <16 x i8> %A, %B;
171 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
175 define <4 x i16> @cmlt4xi16(<4 x i16> %A, <4 x i16> %B) {
176 ; Using registers other than v0, v1 are possible, but would be odd.
177 ; LT implemented as GT, so check reversed operands.
178 ;CHECK: cmgt {{v[0-9]+}}.4h, v1.4h, v0.4h
179 %tmp3 = icmp slt <4 x i16> %A, %B;
180 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
184 define <8 x i16> @cmlt8xi16(<8 x i16> %A, <8 x i16> %B) {
185 ; Using registers other than v0, v1 are possible, but would be odd.
186 ; LT implemented as GT, so check reversed operands.
187 ;CHECK: cmgt {{v[0-9]+}}.8h, v1.8h, v0.8h
188 %tmp3 = icmp slt <8 x i16> %A, %B;
189 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
193 define <2 x i32> @cmlt2xi32(<2 x i32> %A, <2 x i32> %B) {
194 ; Using registers other than v0, v1 are possible, but would be odd.
195 ; LT implemented as GT, so check reversed operands.
196 ;CHECK: cmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
197 %tmp3 = icmp slt <2 x i32> %A, %B;
198 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
202 define <4 x i32> @cmlt4xi32(<4 x i32> %A, <4 x i32> %B) {
203 ; Using registers other than v0, v1 are possible, but would be odd.
204 ; LT implemented as GT, so check reversed operands.
205 ;CHECK: cmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
206 %tmp3 = icmp slt <4 x i32> %A, %B;
207 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
211 define <2 x i64> @cmlt2xi64(<2 x i64> %A, <2 x i64> %B) {
212 ; Using registers other than v0, v1 are possible, but would be odd.
213 ; LT implemented as GT, so check reversed operands.
214 ;CHECK: cmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
215 %tmp3 = icmp slt <2 x i64> %A, %B;
216 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
220 define <8 x i8> @cmge8xi8(<8 x i8> %A, <8 x i8> %B) {
221 ;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
222 %tmp3 = icmp sge <8 x i8> %A, %B;
223 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
227 define <16 x i8> @cmge16xi8(<16 x i8> %A, <16 x i8> %B) {
228 ;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
229 %tmp3 = icmp sge <16 x i8> %A, %B;
230 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
234 define <4 x i16> @cmge4xi16(<4 x i16> %A, <4 x i16> %B) {
235 ;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
236 %tmp3 = icmp sge <4 x i16> %A, %B;
237 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
241 define <8 x i16> @cmge8xi16(<8 x i16> %A, <8 x i16> %B) {
242 ;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
243 %tmp3 = icmp sge <8 x i16> %A, %B;
244 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
248 define <2 x i32> @cmge2xi32(<2 x i32> %A, <2 x i32> %B) {
249 ;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
250 %tmp3 = icmp sge <2 x i32> %A, %B;
251 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
255 define <4 x i32> @cmge4xi32(<4 x i32> %A, <4 x i32> %B) {
256 ;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
257 %tmp3 = icmp sge <4 x i32> %A, %B;
258 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
262 define <2 x i64> @cmge2xi64(<2 x i64> %A, <2 x i64> %B) {
263 ;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
264 %tmp3 = icmp sge <2 x i64> %A, %B;
265 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
269 define <8 x i8> @cmle8xi8(<8 x i8> %A, <8 x i8> %B) {
270 ; Using registers other than v0, v1 are possible, but would be odd.
271 ; LE implemented as GE, so check reversed operands.
272 ;CHECK: cmge {{v[0-9]+}}.8b, v1.8b, v0.8b
273 %tmp3 = icmp sle <8 x i8> %A, %B;
274 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
278 define <16 x i8> @cmle16xi8(<16 x i8> %A, <16 x i8> %B) {
279 ; Using registers other than v0, v1 are possible, but would be odd.
280 ; LE implemented as GE, so check reversed operands.
281 ;CHECK: cmge {{v[0-9]+}}.16b, v1.16b, v0.16b
282 %tmp3 = icmp sle <16 x i8> %A, %B;
283 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
287 define <4 x i16> @cmle4xi16(<4 x i16> %A, <4 x i16> %B) {
288 ; Using registers other than v0, v1 are possible, but would be odd.
289 ; LE implemented as GE, so check reversed operands.
290 ;CHECK: cmge {{v[0-9]+}}.4h, v1.4h, v0.4h
291 %tmp3 = icmp sle <4 x i16> %A, %B;
292 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
296 define <8 x i16> @cmle8xi16(<8 x i16> %A, <8 x i16> %B) {
297 ; Using registers other than v0, v1 are possible, but would be odd.
298 ; LE implemented as GE, so check reversed operands.
299 ;CHECK: cmge {{v[0-9]+}}.8h, v1.8h, v0.8h
300 %tmp3 = icmp sle <8 x i16> %A, %B;
301 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
305 define <2 x i32> @cmle2xi32(<2 x i32> %A, <2 x i32> %B) {
306 ; Using registers other than v0, v1 are possible, but would be odd.
307 ; LE implemented as GE, so check reversed operands.
308 ;CHECK: cmge {{v[0-9]+}}.2s, v1.2s, v0.2s
309 %tmp3 = icmp sle <2 x i32> %A, %B;
310 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
314 define <4 x i32> @cmle4xi32(<4 x i32> %A, <4 x i32> %B) {
315 ; Using registers other than v0, v1 are possible, but would be odd.
316 ; LE implemented as GE, so check reversed operands.
317 ;CHECK: cmge {{v[0-9]+}}.4s, v1.4s, v0.4s
318 %tmp3 = icmp sle <4 x i32> %A, %B;
319 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
323 define <2 x i64> @cmle2xi64(<2 x i64> %A, <2 x i64> %B) {
324 ; Using registers other than v0, v1 are possible, but would be odd.
325 ; LE implemented as GE, so check reversed operands.
326 ;CHECK: cmge {{v[0-9]+}}.2d, v1.2d, v0.2d
327 %tmp3 = icmp sle <2 x i64> %A, %B;
328 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
332 define <8 x i8> @cmhi8xi8(<8 x i8> %A, <8 x i8> %B) {
333 ;CHECK: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
334 %tmp3 = icmp ugt <8 x i8> %A, %B;
335 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
339 define <16 x i8> @cmhi16xi8(<16 x i8> %A, <16 x i8> %B) {
340 ;CHECK: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
341 %tmp3 = icmp ugt <16 x i8> %A, %B;
342 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
346 define <4 x i16> @cmhi4xi16(<4 x i16> %A, <4 x i16> %B) {
347 ;CHECK: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
348 %tmp3 = icmp ugt <4 x i16> %A, %B;
349 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
353 define <8 x i16> @cmhi8xi16(<8 x i16> %A, <8 x i16> %B) {
354 ;CHECK: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
355 %tmp3 = icmp ugt <8 x i16> %A, %B;
356 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
360 define <2 x i32> @cmhi2xi32(<2 x i32> %A, <2 x i32> %B) {
361 ;CHECK: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
362 %tmp3 = icmp ugt <2 x i32> %A, %B;
363 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
367 define <4 x i32> @cmhi4xi32(<4 x i32> %A, <4 x i32> %B) {
368 ;CHECK: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
369 %tmp3 = icmp ugt <4 x i32> %A, %B;
370 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
374 define <2 x i64> @cmhi2xi64(<2 x i64> %A, <2 x i64> %B) {
375 ;CHECK: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
376 %tmp3 = icmp ugt <2 x i64> %A, %B;
377 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
381 define <8 x i8> @cmlo8xi8(<8 x i8> %A, <8 x i8> %B) {
382 ; Using registers other than v0, v1 are possible, but would be odd.
383 ; LO implemented as HI, so check reversed operands.
384 ;CHECK: cmhi {{v[0-9]+}}.8b, v1.8b, v0.8b
385 %tmp3 = icmp ult <8 x i8> %A, %B;
386 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
390 define <16 x i8> @cmlo16xi8(<16 x i8> %A, <16 x i8> %B) {
391 ; Using registers other than v0, v1 are possible, but would be odd.
392 ; LO implemented as HI, so check reversed operands.
393 ;CHECK: cmhi {{v[0-9]+}}.16b, v1.16b, v0.16b
394 %tmp3 = icmp ult <16 x i8> %A, %B;
395 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
399 define <4 x i16> @cmlo4xi16(<4 x i16> %A, <4 x i16> %B) {
400 ; Using registers other than v0, v1 are possible, but would be odd.
401 ; LO implemented as HI, so check reversed operands.
402 ;CHECK: cmhi {{v[0-9]+}}.4h, v1.4h, v0.4h
403 %tmp3 = icmp ult <4 x i16> %A, %B;
404 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
408 define <8 x i16> @cmlo8xi16(<8 x i16> %A, <8 x i16> %B) {
409 ; Using registers other than v0, v1 are possible, but would be odd.
410 ; LO implemented as HI, so check reversed operands.
411 ;CHECK: cmhi {{v[0-9]+}}.8h, v1.8h, v0.8h
412 %tmp3 = icmp ult <8 x i16> %A, %B;
413 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
417 define <2 x i32> @cmlo2xi32(<2 x i32> %A, <2 x i32> %B) {
418 ; Using registers other than v0, v1 are possible, but would be odd.
419 ; LO implemented as HI, so check reversed operands.
420 ;CHECK: cmhi {{v[0-9]+}}.2s, v1.2s, v0.2s
421 %tmp3 = icmp ult <2 x i32> %A, %B;
422 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
426 define <4 x i32> @cmlo4xi32(<4 x i32> %A, <4 x i32> %B) {
427 ; Using registers other than v0, v1 are possible, but would be odd.
428 ; LO implemented as HI, so check reversed operands.
429 ;CHECK: cmhi {{v[0-9]+}}.4s, v1.4s, v0.4s
430 %tmp3 = icmp ult <4 x i32> %A, %B;
431 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
435 define <2 x i64> @cmlo2xi64(<2 x i64> %A, <2 x i64> %B) {
436 ; Using registers other than v0, v1 are possible, but would be odd.
437 ; LO implemented as HI, so check reversed operands.
438 ;CHECK: cmhi {{v[0-9]+}}.2d, v1.2d, v0.2d
439 %tmp3 = icmp ult <2 x i64> %A, %B;
440 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
444 define <8 x i8> @cmhs8xi8(<8 x i8> %A, <8 x i8> %B) {
445 ;CHECK: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
446 %tmp3 = icmp uge <8 x i8> %A, %B;
447 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
451 define <16 x i8> @cmhs16xi8(<16 x i8> %A, <16 x i8> %B) {
452 ;CHECK: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
453 %tmp3 = icmp uge <16 x i8> %A, %B;
454 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
458 define <4 x i16> @cmhs4xi16(<4 x i16> %A, <4 x i16> %B) {
459 ;CHECK: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
460 %tmp3 = icmp uge <4 x i16> %A, %B;
461 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
465 define <8 x i16> @cmhs8xi16(<8 x i16> %A, <8 x i16> %B) {
466 ;CHECK: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
467 %tmp3 = icmp uge <8 x i16> %A, %B;
468 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
472 define <2 x i32> @cmhs2xi32(<2 x i32> %A, <2 x i32> %B) {
473 ;CHECK: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
474 %tmp3 = icmp uge <2 x i32> %A, %B;
475 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
479 define <4 x i32> @cmhs4xi32(<4 x i32> %A, <4 x i32> %B) {
480 ;CHECK: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
481 %tmp3 = icmp uge <4 x i32> %A, %B;
482 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
486 define <2 x i64> @cmhs2xi64(<2 x i64> %A, <2 x i64> %B) {
487 ;CHECK: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
488 %tmp3 = icmp uge <2 x i64> %A, %B;
489 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
493 define <8 x i8> @cmls8xi8(<8 x i8> %A, <8 x i8> %B) {
494 ; Using registers other than v0, v1 are possible, but would be odd.
495 ; LS implemented as HS, so check reversed operands.
496 ;CHECK: cmhs {{v[0-9]+}}.8b, v1.8b, v0.8b
497 %tmp3 = icmp ule <8 x i8> %A, %B;
498 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
502 define <16 x i8> @cmls16xi8(<16 x i8> %A, <16 x i8> %B) {
503 ; Using registers other than v0, v1 are possible, but would be odd.
504 ; LS implemented as HS, so check reversed operands.
505 ;CHECK: cmhs {{v[0-9]+}}.16b, v1.16b, v0.16b
506 %tmp3 = icmp ule <16 x i8> %A, %B;
507 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
511 define <4 x i16> @cmls4xi16(<4 x i16> %A, <4 x i16> %B) {
512 ; Using registers other than v0, v1 are possible, but would be odd.
513 ; LS implemented as HS, so check reversed operands.
514 ;CHECK: cmhs {{v[0-9]+}}.4h, v1.4h, v0.4h
515 %tmp3 = icmp ule <4 x i16> %A, %B;
516 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
520 define <8 x i16> @cmls8xi16(<8 x i16> %A, <8 x i16> %B) {
521 ; Using registers other than v0, v1 are possible, but would be odd.
522 ; LS implemented as HS, so check reversed operands.
523 ;CHECK: cmhs {{v[0-9]+}}.8h, v1.8h, v0.8h
524 %tmp3 = icmp ule <8 x i16> %A, %B;
525 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
529 define <2 x i32> @cmls2xi32(<2 x i32> %A, <2 x i32> %B) {
530 ; Using registers other than v0, v1 are possible, but would be odd.
531 ; LS implemented as HS, so check reversed operands.
532 ;CHECK: cmhs {{v[0-9]+}}.2s, v1.2s, v0.2s
533 %tmp3 = icmp ule <2 x i32> %A, %B;
534 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
538 define <4 x i32> @cmls4xi32(<4 x i32> %A, <4 x i32> %B) {
539 ; Using registers other than v0, v1 are possible, but would be odd.
540 ; LS implemented as HS, so check reversed operands.
541 ;CHECK: cmhs {{v[0-9]+}}.4s, v1.4s, v0.4s
542 %tmp3 = icmp ule <4 x i32> %A, %B;
543 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
547 define <2 x i64> @cmls2xi64(<2 x i64> %A, <2 x i64> %B) {
548 ; Using registers other than v0, v1 are possible, but would be odd.
549 ; LS implemented as HS, so check reversed operands.
550 ;CHECK: cmhs {{v[0-9]+}}.2d, v1.2d, v0.2d
551 %tmp3 = icmp ule <2 x i64> %A, %B;
552 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
557 define <8 x i8> @cmeqz8xi8(<8 x i8> %A) {
558 ;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
559 %tmp3 = icmp eq <8 x i8> %A, zeroinitializer;
560 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
564 define <16 x i8> @cmeqz16xi8(<16 x i8> %A) {
565 ;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
566 %tmp3 = icmp eq <16 x i8> %A, zeroinitializer;
567 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
571 define <4 x i16> @cmeqz4xi16(<4 x i16> %A) {
572 ;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
573 %tmp3 = icmp eq <4 x i16> %A, zeroinitializer;
574 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
578 define <8 x i16> @cmeqz8xi16(<8 x i16> %A) {
579 ;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
580 %tmp3 = icmp eq <8 x i16> %A, zeroinitializer;
581 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
585 define <2 x i32> @cmeqz2xi32(<2 x i32> %A) {
586 ;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
587 %tmp3 = icmp eq <2 x i32> %A, zeroinitializer;
588 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
592 define <4 x i32> @cmeqz4xi32(<4 x i32> %A) {
593 ;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
594 %tmp3 = icmp eq <4 x i32> %A, zeroinitializer;
595 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
599 define <2 x i64> @cmeqz2xi64(<2 x i64> %A) {
600 ;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
601 %tmp3 = icmp eq <2 x i64> %A, zeroinitializer;
602 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
607 define <8 x i8> @cmgez8xi8(<8 x i8> %A) {
608 ;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
609 %tmp3 = icmp sge <8 x i8> %A, zeroinitializer;
610 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
614 define <16 x i8> @cmgez16xi8(<16 x i8> %A) {
615 ;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
616 %tmp3 = icmp sge <16 x i8> %A, zeroinitializer;
617 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
621 define <4 x i16> @cmgez4xi16(<4 x i16> %A) {
622 ;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
623 %tmp3 = icmp sge <4 x i16> %A, zeroinitializer;
624 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
628 define <8 x i16> @cmgez8xi16(<8 x i16> %A) {
629 ;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
630 %tmp3 = icmp sge <8 x i16> %A, zeroinitializer;
631 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
635 define <2 x i32> @cmgez2xi32(<2 x i32> %A) {
636 ;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
637 %tmp3 = icmp sge <2 x i32> %A, zeroinitializer;
638 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
642 define <4 x i32> @cmgez4xi32(<4 x i32> %A) {
643 ;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
644 %tmp3 = icmp sge <4 x i32> %A, zeroinitializer;
645 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
649 define <2 x i64> @cmgez2xi64(<2 x i64> %A) {
650 ;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
651 %tmp3 = icmp sge <2 x i64> %A, zeroinitializer;
652 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
657 define <8 x i8> @cmgtz8xi8(<8 x i8> %A) {
658 ;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
659 %tmp3 = icmp sgt <8 x i8> %A, zeroinitializer;
660 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
664 define <16 x i8> @cmgtz16xi8(<16 x i8> %A) {
665 ;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
666 %tmp3 = icmp sgt <16 x i8> %A, zeroinitializer;
667 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
671 define <4 x i16> @cmgtz4xi16(<4 x i16> %A) {
672 ;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
673 %tmp3 = icmp sgt <4 x i16> %A, zeroinitializer;
674 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
678 define <8 x i16> @cmgtz8xi16(<8 x i16> %A) {
679 ;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
680 %tmp3 = icmp sgt <8 x i16> %A, zeroinitializer;
681 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
685 define <2 x i32> @cmgtz2xi32(<2 x i32> %A) {
686 ;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
687 %tmp3 = icmp sgt <2 x i32> %A, zeroinitializer;
688 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
692 define <4 x i32> @cmgtz4xi32(<4 x i32> %A) {
693 ;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
694 %tmp3 = icmp sgt <4 x i32> %A, zeroinitializer;
695 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
699 define <2 x i64> @cmgtz2xi64(<2 x i64> %A) {
700 ;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
701 %tmp3 = icmp sgt <2 x i64> %A, zeroinitializer;
702 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
706 define <8 x i8> @cmlez8xi8(<8 x i8> %A) {
707 ;CHECK: cmle {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
708 %tmp3 = icmp sle <8 x i8> %A, zeroinitializer;
709 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
713 define <16 x i8> @cmlez16xi8(<16 x i8> %A) {
714 ;CHECK: cmle {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
715 %tmp3 = icmp sle <16 x i8> %A, zeroinitializer;
716 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
720 define <4 x i16> @cmlez4xi16(<4 x i16> %A) {
721 ;CHECK: cmle {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
722 %tmp3 = icmp sle <4 x i16> %A, zeroinitializer;
723 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
727 define <8 x i16> @cmlez8xi16(<8 x i16> %A) {
728 ;CHECK: cmle {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
729 %tmp3 = icmp sle <8 x i16> %A, zeroinitializer;
730 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
734 define <2 x i32> @cmlez2xi32(<2 x i32> %A) {
735 ;CHECK: cmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
736 %tmp3 = icmp sle <2 x i32> %A, zeroinitializer;
737 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
741 define <4 x i32> @cmlez4xi32(<4 x i32> %A) {
742 ;CHECK: cmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
743 %tmp3 = icmp sle <4 x i32> %A, zeroinitializer;
744 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
748 define <2 x i64> @cmlez2xi64(<2 x i64> %A) {
749 ;CHECK: cmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
750 %tmp3 = icmp sle <2 x i64> %A, zeroinitializer;
751 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
755 define <8 x i8> @cmltz8xi8(<8 x i8> %A) {
756 ;CHECK: cmlt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
757 %tmp3 = icmp slt <8 x i8> %A, zeroinitializer;
758 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
762 define <16 x i8> @cmltz16xi8(<16 x i8> %A) {
763 ;CHECK: cmlt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
764 %tmp3 = icmp slt <16 x i8> %A, zeroinitializer;
765 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
769 define <4 x i16> @cmltz4xi16(<4 x i16> %A) {
770 ;CHECK: cmlt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
771 %tmp3 = icmp slt <4 x i16> %A, zeroinitializer;
772 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
776 define <8 x i16> @cmltz8xi16(<8 x i16> %A) {
777 ;CHECK: cmlt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
778 %tmp3 = icmp slt <8 x i16> %A, zeroinitializer;
779 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
783 define <2 x i32> @cmltz2xi32(<2 x i32> %A) {
784 ;CHECK: cmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
785 %tmp3 = icmp slt <2 x i32> %A, zeroinitializer;
786 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
790 define <4 x i32> @cmltz4xi32(<4 x i32> %A) {
791 ;CHECK: cmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
792 %tmp3 = icmp slt <4 x i32> %A, zeroinitializer;
793 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
797 define <2 x i64> @cmltz2xi64(<2 x i64> %A) {
798 ;CHECK: cmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
799 %tmp3 = icmp slt <2 x i64> %A, zeroinitializer;
800 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
804 define <8 x i8> @cmneqz8xi8(<8 x i8> %A) {
805 ;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
806 ;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
807 %tmp3 = icmp ne <8 x i8> %A, zeroinitializer;
808 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
812 define <16 x i8> @cmneqz16xi8(<16 x i8> %A) {
813 ;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
814 ;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
815 %tmp3 = icmp ne <16 x i8> %A, zeroinitializer;
816 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
820 define <4 x i16> @cmneqz4xi16(<4 x i16> %A) {
821 ;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
822 ;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
823 %tmp3 = icmp ne <4 x i16> %A, zeroinitializer;
824 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
828 define <8 x i16> @cmneqz8xi16(<8 x i16> %A) {
829 ;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
830 ;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
831 %tmp3 = icmp ne <8 x i16> %A, zeroinitializer;
832 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
836 define <2 x i32> @cmneqz2xi32(<2 x i32> %A) {
837 ;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
838 ;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
839 %tmp3 = icmp ne <2 x i32> %A, zeroinitializer;
840 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
844 define <4 x i32> @cmneqz4xi32(<4 x i32> %A) {
845 ;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
846 ;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
847 %tmp3 = icmp ne <4 x i32> %A, zeroinitializer;
848 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
852 define <2 x i64> @cmneqz2xi64(<2 x i64> %A) {
853 ;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
854 ;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
855 %tmp3 = icmp ne <2 x i64> %A, zeroinitializer;
856 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
860 define <8 x i8> @cmhsz8xi8(<8 x i8> %A) {
861 ;CHECK: movi d[[ZERO:[0-9]+]], #0
862 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, v[[ZERO]].8b
863 %tmp3 = icmp uge <8 x i8> %A, zeroinitializer;
864 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
868 define <16 x i8> @cmhsz16xi8(<16 x i8> %A) {
869 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
870 ;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, v[[ZERO]].16b
871 %tmp3 = icmp uge <16 x i8> %A, zeroinitializer;
872 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
876 define <4 x i16> @cmhsz4xi16(<4 x i16> %A) {
877 ;CHECK: movi d[[ZERO:[0-9]+]], #0
878 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, v[[ZERO]].4h
879 %tmp3 = icmp uge <4 x i16> %A, zeroinitializer;
880 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
884 define <8 x i16> @cmhsz8xi16(<8 x i16> %A) {
885 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
886 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, v[[ZERO]].8h
887 %tmp3 = icmp uge <8 x i16> %A, zeroinitializer;
888 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
892 define <2 x i32> @cmhsz2xi32(<2 x i32> %A) {
893 ;CHECK: movi d[[ZERO:[0-9]+]], #0
894 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, v[[ZERO]].2s
895 %tmp3 = icmp uge <2 x i32> %A, zeroinitializer;
896 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
900 define <4 x i32> @cmhsz4xi32(<4 x i32> %A) {
901 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
902 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, v[[ZERO]].4s
903 %tmp3 = icmp uge <4 x i32> %A, zeroinitializer;
904 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
908 define <2 x i64> @cmhsz2xi64(<2 x i64> %A) {
909 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
910 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, v[[ZERO]].2d
911 %tmp3 = icmp uge <2 x i64> %A, zeroinitializer;
912 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
917 define <8 x i8> @cmhiz8xi8(<8 x i8> %A) {
918 ;CHECK: movi d[[ZERO:[0-9]+]], #0
919 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, v[[ZERO]].8b
920 %tmp3 = icmp ugt <8 x i8> %A, zeroinitializer;
921 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
925 define <16 x i8> @cmhiz16xi8(<16 x i8> %A) {
926 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
927 ;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, v[[ZERO]].16b
928 %tmp3 = icmp ugt <16 x i8> %A, zeroinitializer;
929 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
933 define <4 x i16> @cmhiz4xi16(<4 x i16> %A) {
934 ;CHECK: movi d[[ZERO:[0-9]+]], #0
935 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, v[[ZERO]].4h
936 %tmp3 = icmp ugt <4 x i16> %A, zeroinitializer;
937 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
941 define <8 x i16> @cmhiz8xi16(<8 x i16> %A) {
942 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
943 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, v[[ZERO]].8h
944 %tmp3 = icmp ugt <8 x i16> %A, zeroinitializer;
945 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
949 define <2 x i32> @cmhiz2xi32(<2 x i32> %A) {
950 ;CHECK: movi d[[ZERO:[0-9]+]], #0
951 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, v[[ZERO]].2s
952 %tmp3 = icmp ugt <2 x i32> %A, zeroinitializer;
953 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
957 define <4 x i32> @cmhiz4xi32(<4 x i32> %A) {
958 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
959 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, v[[ZERO]].4s
960 %tmp3 = icmp ugt <4 x i32> %A, zeroinitializer;
961 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
965 define <2 x i64> @cmhiz2xi64(<2 x i64> %A) {
966 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
967 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, v[[ZERO]].2d
968 %tmp3 = icmp ugt <2 x i64> %A, zeroinitializer;
969 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
973 define <8 x i8> @cmlsz8xi8(<8 x i8> %A) {
974 ; Using registers other than v0, v1 are possible, but would be odd.
975 ; LS implemented as HS, so check reversed operands.
976 ;CHECK: movi d[[ZERO:[0-9]+]], #0
977 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, v[[ZERO]].8b, v0.8b
978 %tmp3 = icmp ule <8 x i8> %A, zeroinitializer;
979 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
983 define <16 x i8> @cmlsz16xi8(<16 x i8> %A) {
984 ; Using registers other than v0, v1 are possible, but would be odd.
985 ; LS implemented as HS, so check reversed operands.
986 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
987 ;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, v[[ZERO]].16b, v0.16b
988 %tmp3 = icmp ule <16 x i8> %A, zeroinitializer;
989 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
993 define <4 x i16> @cmlsz4xi16(<4 x i16> %A) {
994 ; Using registers other than v0, v1 are possible, but would be odd.
995 ; LS implemented as HS, so check reversed operands.
996 ;CHECK: movi d[[ZERO:[0-9]+]], #0
997 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, v[[ZERO]].4h, v0.4h
998 %tmp3 = icmp ule <4 x i16> %A, zeroinitializer;
999 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
1003 define <8 x i16> @cmlsz8xi16(<8 x i16> %A) {
1004 ; Using registers other than v0, v1 are possible, but would be odd.
1005 ; LS implemented as HS, so check reversed operands.
1006 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
1007 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, v[[ZERO]].8h, v0.8h
1008 %tmp3 = icmp ule <8 x i16> %A, zeroinitializer;
1009 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
1013 define <2 x i32> @cmlsz2xi32(<2 x i32> %A) {
1014 ; Using registers other than v0, v1 are possible, but would be odd.
1015 ; LS implemented as HS, so check reversed operands.
1016 ;CHECK: movi d[[ZERO:[0-9]+]], #0
1017 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, v[[ZERO]].2s, v0.2s
1018 %tmp3 = icmp ule <2 x i32> %A, zeroinitializer;
1019 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1023 define <4 x i32> @cmlsz4xi32(<4 x i32> %A) {
1024 ; Using registers other than v0, v1 are possible, but would be odd.
1025 ; LS implemented as HS, so check reversed operands.
1026 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
1027 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, v[[ZERO]].4s, v0.4s
1028 %tmp3 = icmp ule <4 x i32> %A, zeroinitializer;
1029 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1033 define <2 x i64> @cmlsz2xi64(<2 x i64> %A) {
1034 ; Using registers other than v0, v1 are possible, but would be odd.
1035 ; LS implemented as HS, so check reversed operands.
1036 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
1037 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, v[[ZERO]].2d, v0.2d
1038 %tmp3 = icmp ule <2 x i64> %A, zeroinitializer;
1039 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1043 define <8 x i8> @cmloz8xi8(<8 x i8> %A) {
1044 ; Using registers other than v0, v1 are possible, but would be odd.
1045 ; LO implemented as HI, so check reversed operands.
1046 ;CHECK: movi d[[ZERO:[0-9]+]], #0
1047 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, v[[ZERO]].8b, {{v[0-9]+}}.8b
1048 %tmp3 = icmp ult <8 x i8> %A, zeroinitializer;
1049 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
1053 define <16 x i8> @cmloz16xi8(<16 x i8> %A) {
1054 ; Using registers other than v0, v1 are possible, but would be odd.
1055 ; LO implemented as HI, so check reversed operands.
1056 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
1057 ;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, v[[ZERO]].16b, v0.16b
1058 %tmp3 = icmp ult <16 x i8> %A, zeroinitializer;
1059 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
1063 define <4 x i16> @cmloz4xi16(<4 x i16> %A) {
1064 ; Using registers other than v0, v1 are possible, but would be odd.
1065 ; LO implemented as HI, so check reversed operands.
1066 ;CHECK: movi d[[ZERO:[0-9]+]], #0
1067 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, v[[ZERO]].4h, v0.4h
1068 %tmp3 = icmp ult <4 x i16> %A, zeroinitializer;
1069 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
1073 define <8 x i16> @cmloz8xi16(<8 x i16> %A) {
1074 ; Using registers other than v0, v1 are possible, but would be odd.
1075 ; LO implemented as HI, so check reversed operands.
1076 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
1077 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, v[[ZERO]].8h, v0.8h
1078 %tmp3 = icmp ult <8 x i16> %A, zeroinitializer;
1079 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
1083 define <2 x i32> @cmloz2xi32(<2 x i32> %A) {
1084 ; Using registers other than v0, v1 are possible, but would be odd.
1085 ; LO implemented as HI, so check reversed operands.
1086 ;CHECK: movi d[[ZERO:[0-9]+]], #0
1087 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, v[[ZERO]].2s, v0.2s
1088 %tmp3 = icmp ult <2 x i32> %A, zeroinitializer;
1089 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1093 define <4 x i32> @cmloz4xi32(<4 x i32> %A) {
1094 ; Using registers other than v0, v1 are possible, but would be odd.
1095 ; LO implemented as HI, so check reversed operands.
1096 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
1097 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, v[[ZERO]].4s, v0.4s
1098 %tmp3 = icmp ult <4 x i32> %A, zeroinitializer;
1099 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1103 define <2 x i64> @cmloz2xi64(<2 x i64> %A) {
1104 ; Using registers other than v0, v1 are possible, but would be odd.
1105 ; LO implemented as HI, so check reversed operands.
1106 ;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
1107 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, v[[ZERO]].2d, v0.2d
1108 %tmp3 = icmp ult <2 x i64> %A, zeroinitializer;
1109 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1113 define <1 x i64> @cmeqz_v1i64(<1 x i64> %A) {
1114 ; CHECK-LABEL: cmeqz_v1i64:
1115 ; CHECK: cmeq d0, d0, #0
1116 %tst = icmp eq <1 x i64> %A, <i64 0>
1117 %mask = sext <1 x i1> %tst to <1 x i64>
1121 define <1 x i64> @cmgez_v1i64(<1 x i64> %A) {
1122 ; CHECK-LABEL: cmgez_v1i64:
1123 ; CHECK: cmge d0, d0, #0
1124 %tst = icmp sge <1 x i64> %A, <i64 0>
1125 %mask = sext <1 x i1> %tst to <1 x i64>
1129 define <1 x i64> @cmgtz_v1i64(<1 x i64> %A) {
1130 ; CHECK-LABEL: cmgtz_v1i64:
1131 ; CHECK: cmgt d0, d0, #0
1132 %tst = icmp sgt <1 x i64> %A, <i64 0>
1133 %mask = sext <1 x i1> %tst to <1 x i64>
1137 define <1 x i64> @cmlez_v1i64(<1 x i64> %A) {
1138 ; CHECK-LABEL: cmlez_v1i64:
1139 ; CHECK: cmle d0, d0, #0
1140 %tst = icmp sle <1 x i64> %A, <i64 0>
1141 %mask = sext <1 x i1> %tst to <1 x i64>
1145 define <1 x i64> @cmltz_v1i64(<1 x i64> %A) {
1146 ; CHECK-LABEL: cmltz_v1i64:
1147 ; CHECK: cmlt d0, d0, #0
1148 %tst = icmp slt <1 x i64> %A, <i64 0>
1149 %mask = sext <1 x i1> %tst to <1 x i64>
1153 define <1 x i64> @fcmeqz_v1f64(<1 x double> %A) {
1154 ; CHECK-LABEL: fcmeqz_v1f64:
1155 ; CHECK: fcmeq d0, d0, #0
1156 %tst = fcmp oeq <1 x double> %A, <double 0.0>
1157 %mask = sext <1 x i1> %tst to <1 x i64>
1161 define <1 x i64> @fcmgez_v1f64(<1 x double> %A) {
1162 ; CHECK-LABEL: fcmgez_v1f64:
1163 ; CHECK: fcmge d0, d0, #0
1164 %tst = fcmp oge <1 x double> %A, <double 0.0>
1165 %mask = sext <1 x i1> %tst to <1 x i64>
1169 define <1 x i64> @fcmgtz_v1f64(<1 x double> %A) {
1170 ; CHECK-LABEL: fcmgtz_v1f64:
1171 ; CHECK: fcmgt d0, d0, #0
1172 %tst = fcmp ogt <1 x double> %A, <double 0.0>
1173 %mask = sext <1 x i1> %tst to <1 x i64>
1177 define <1 x i64> @fcmlez_v1f64(<1 x double> %A) {
1178 ; CHECK-LABEL: fcmlez_v1f64:
1179 ; CHECK: fcmle d0, d0, #0
1180 %tst = fcmp ole <1 x double> %A, <double 0.0>
1181 %mask = sext <1 x i1> %tst to <1 x i64>
1185 define <1 x i64> @fcmltz_v1f64(<1 x double> %A) {
1186 ; CHECK-LABEL: fcmltz_v1f64:
1187 ; CHECK: fcmlt d0, d0, #0
1188 %tst = fcmp olt <1 x double> %A, <double 0.0>
1189 %mask = sext <1 x i1> %tst to <1 x i64>