1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=armv8a--none-eabi -mattr=+fullfp16 -float-abi=hard | FileCheck %s
4 @varhalf = global half 0.0
5 @vardouble = global double 0.0
6 define void @test_vsel32sgt(i32 %lhs, i32 %rhs, half* %a_ptr, half* %b_ptr) {
7 ; CHECK-LABEL: test_vsel32sgt:
9 ; CHECK-NEXT: vldr.16 s0, [r2]
10 ; CHECK-NEXT: vldr.16 s2, [r3]
11 ; CHECK-NEXT: cmp r0, r1
12 ; CHECK-NEXT: movw r0, :lower16:varhalf
13 ; CHECK-NEXT: vselgt.f16 s0, s0, s2
14 ; CHECK-NEXT: movt r0, :upper16:varhalf
15 ; CHECK-NEXT: vstr.16 s0, [r0]
17 %a = load volatile half, half* %a_ptr
18 %b = load volatile half, half* %b_ptr
19 %tst1 = icmp sgt i32 %lhs, %rhs
20 %val1 = select i1 %tst1, half %a, half %b
21 store half %val1, half* @varhalf
25 define void @test_vsel32sge(i32 %lhs, i32 %rhs, half* %a_ptr, half* %b_ptr) {
26 ; CHECK-LABEL: test_vsel32sge:
28 ; CHECK-NEXT: vldr.16 s0, [r2]
29 ; CHECK-NEXT: vldr.16 s2, [r3]
30 ; CHECK-NEXT: cmp r0, r1
31 ; CHECK-NEXT: movw r0, :lower16:varhalf
32 ; CHECK-NEXT: vselge.f16 s0, s0, s2
33 ; CHECK-NEXT: movt r0, :upper16:varhalf
34 ; CHECK-NEXT: vstr.16 s0, [r0]
36 %a = load volatile half, half* %a_ptr
37 %b = load volatile half, half* %b_ptr
38 %tst1 = icmp sge i32 %lhs, %rhs
39 %val1 = select i1 %tst1, half %a, half %b
40 store half %val1, half* @varhalf
44 define void @test_vsel32eq(i32 %lhs, i32 %rhs, half* %a_ptr, half* %b_ptr) {
45 ; CHECK-LABEL: test_vsel32eq:
47 ; CHECK-NEXT: vldr.16 s0, [r2]
48 ; CHECK-NEXT: vldr.16 s2, [r3]
49 ; CHECK-NEXT: cmp r0, r1
50 ; CHECK-NEXT: movw r0, :lower16:varhalf
51 ; CHECK-NEXT: vseleq.f16 s0, s0, s2
52 ; CHECK-NEXT: movt r0, :upper16:varhalf
53 ; CHECK-NEXT: vstr.16 s0, [r0]
55 %a = load volatile half, half* %a_ptr
56 %b = load volatile half, half* %b_ptr
57 %tst1 = icmp eq i32 %lhs, %rhs
58 %val1 = select i1 %tst1, half %a, half %b
59 store half %val1, half* @varhalf
63 define void @test_vsel32slt(i32 %lhs, i32 %rhs, half* %a_ptr, half* %b_ptr) {
64 ; CHECK-LABEL: test_vsel32slt:
66 ; CHECK-NEXT: vldr.16 s0, [r2]
67 ; CHECK-NEXT: vldr.16 s2, [r3]
68 ; CHECK-NEXT: cmp r0, r1
69 ; CHECK-NEXT: movw r0, :lower16:varhalf
70 ; CHECK-NEXT: vselge.f16 s0, s2, s0
71 ; CHECK-NEXT: movt r0, :upper16:varhalf
72 ; CHECK-NEXT: vstr.16 s0, [r0]
74 %a = load volatile half, half* %a_ptr
75 %b = load volatile half, half* %b_ptr
76 %tst1 = icmp slt i32 %lhs, %rhs
77 %val1 = select i1 %tst1, half %a, half %b
78 store half %val1, half* @varhalf
82 define void @test_vsel32sle(i32 %lhs, i32 %rhs, half* %a_ptr, half* %b_ptr) {
83 ; CHECK-LABEL: test_vsel32sle:
85 ; CHECK-NEXT: vldr.16 s0, [r2]
86 ; CHECK-NEXT: vldr.16 s2, [r3]
87 ; CHECK-NEXT: cmp r0, r1
88 ; CHECK-NEXT: movw r0, :lower16:varhalf
89 ; CHECK-NEXT: vselgt.f16 s0, s2, s0
90 ; CHECK-NEXT: movt r0, :upper16:varhalf
91 ; CHECK-NEXT: vstr.16 s0, [r0]
93 %a = load volatile half, half* %a_ptr
94 %b = load volatile half, half* %b_ptr
95 %tst1 = icmp sle i32 %lhs, %rhs
96 %val1 = select i1 %tst1, half %a, half %b
97 store half %val1, half* @varhalf
101 define void @test_vsel32ogt(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
102 ; CHECK-LABEL: test_vsel32ogt:
104 ; CHECK-NEXT: vldr.16 s0, [r2]
105 ; CHECK-NEXT: vldr.16 s2, [r3]
106 ; CHECK-NEXT: vldr.16 s4, [r0]
107 ; CHECK-NEXT: vldr.16 s6, [r1]
108 ; CHECK-NEXT: movw r0, :lower16:varhalf
109 ; CHECK-NEXT: vcmpe.f16 s4, s6
110 ; CHECK-NEXT: movt r0, :upper16:varhalf
111 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
112 ; CHECK-NEXT: vselgt.f16 s0, s0, s2
113 ; CHECK-NEXT: vstr.16 s0, [r0]
115 %a = load volatile half, half* %a_ptr
116 %b = load volatile half, half* %b_ptr
117 %lhs = load volatile half, half* %lhs_ptr
118 %rhs = load volatile half, half* %rhs_ptr
119 %tst1 = fcmp ogt half %lhs, %rhs
120 %val1 = select i1 %tst1, half %a, half %b
121 store half %val1, half* @varhalf
125 define void @test_vsel32oge(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
126 ; CHECK-LABEL: test_vsel32oge:
128 ; CHECK-NEXT: vldr.16 s0, [r2]
129 ; CHECK-NEXT: vldr.16 s2, [r3]
130 ; CHECK-NEXT: vldr.16 s4, [r0]
131 ; CHECK-NEXT: vldr.16 s6, [r1]
132 ; CHECK-NEXT: movw r0, :lower16:varhalf
133 ; CHECK-NEXT: vcmpe.f16 s4, s6
134 ; CHECK-NEXT: movt r0, :upper16:varhalf
135 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
136 ; CHECK-NEXT: vselge.f16 s0, s0, s2
137 ; CHECK-NEXT: vstr.16 s0, [r0]
139 %a = load volatile half, half* %a_ptr
140 %b = load volatile half, half* %b_ptr
141 %lhs = load volatile half, half* %lhs_ptr
142 %rhs = load volatile half, half* %rhs_ptr
143 %tst1 = fcmp oge half %lhs, %rhs
144 %val1 = select i1 %tst1, half %a, half %b
145 store half %val1, half* @varhalf
149 define void @test_vsel32oeq(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
150 ; CHECK-LABEL: test_vsel32oeq:
152 ; CHECK-NEXT: vldr.16 s0, [r2]
153 ; CHECK-NEXT: vldr.16 s2, [r3]
154 ; CHECK-NEXT: vldr.16 s4, [r0]
155 ; CHECK-NEXT: vldr.16 s6, [r1]
156 ; CHECK-NEXT: movw r0, :lower16:varhalf
157 ; CHECK-NEXT: vcmp.f16 s4, s6
158 ; CHECK-NEXT: movt r0, :upper16:varhalf
159 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
160 ; CHECK-NEXT: vseleq.f16 s0, s0, s2
161 ; CHECK-NEXT: vstr.16 s0, [r0]
163 %a = load volatile half, half* %a_ptr
164 %b = load volatile half, half* %b_ptr
165 %lhs = load volatile half, half* %lhs_ptr
166 %rhs = load volatile half, half* %rhs_ptr
167 %tst1 = fcmp oeq half %lhs, %rhs
168 %val1 = select i1 %tst1, half %a, half %b
169 store half %val1, half* @varhalf
173 define void @test_vsel32ugt(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
174 ; CHECK-LABEL: test_vsel32ugt:
176 ; CHECK-NEXT: vldr.16 s0, [r2]
177 ; CHECK-NEXT: vldr.16 s2, [r3]
178 ; CHECK-NEXT: vldr.16 s4, [r0]
179 ; CHECK-NEXT: vldr.16 s6, [r1]
180 ; CHECK-NEXT: movw r0, :lower16:varhalf
181 ; CHECK-NEXT: vcmpe.f16 s6, s4
182 ; CHECK-NEXT: movt r0, :upper16:varhalf
183 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
184 ; CHECK-NEXT: vselge.f16 s0, s2, s0
185 ; CHECK-NEXT: vstr.16 s0, [r0]
187 %a = load volatile half, half* %a_ptr
188 %b = load volatile half, half* %b_ptr
189 %lhs = load volatile half, half* %lhs_ptr
190 %rhs = load volatile half, half* %rhs_ptr
191 %tst1 = fcmp ugt half %lhs, %rhs
192 %val1 = select i1 %tst1, half %a, half %b
193 store half %val1, half* @varhalf
197 define void @test_vsel32uge(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
198 ; CHECK-LABEL: test_vsel32uge:
200 ; CHECK-NEXT: vldr.16 s0, [r2]
201 ; CHECK-NEXT: vldr.16 s2, [r3]
202 ; CHECK-NEXT: vldr.16 s4, [r0]
203 ; CHECK-NEXT: vldr.16 s6, [r1]
204 ; CHECK-NEXT: movw r0, :lower16:varhalf
205 ; CHECK-NEXT: vcmpe.f16 s6, s4
206 ; CHECK-NEXT: movt r0, :upper16:varhalf
207 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
208 ; CHECK-NEXT: vselgt.f16 s0, s2, s0
209 ; CHECK-NEXT: vstr.16 s0, [r0]
211 %a = load volatile half, half* %a_ptr
212 %b = load volatile half, half* %b_ptr
213 %lhs = load volatile half, half* %lhs_ptr
214 %rhs = load volatile half, half* %rhs_ptr
215 %tst1 = fcmp uge half %lhs, %rhs
216 %val1 = select i1 %tst1, half %a, half %b
217 store half %val1, half* @varhalf
221 define void @test_vsel32olt(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
222 ; CHECK-LABEL: test_vsel32olt:
224 ; CHECK-NEXT: vldr.16 s0, [r2]
225 ; CHECK-NEXT: vldr.16 s2, [r3]
226 ; CHECK-NEXT: vldr.16 s4, [r0]
227 ; CHECK-NEXT: vldr.16 s6, [r1]
228 ; CHECK-NEXT: movw r0, :lower16:varhalf
229 ; CHECK-NEXT: vcmpe.f16 s6, s4
230 ; CHECK-NEXT: movt r0, :upper16:varhalf
231 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
232 ; CHECK-NEXT: vselgt.f16 s0, s0, s2
233 ; CHECK-NEXT: vstr.16 s0, [r0]
235 %a = load volatile half, half* %a_ptr
236 %b = load volatile half, half* %b_ptr
237 %lhs = load volatile half, half* %lhs_ptr
238 %rhs = load volatile half, half* %rhs_ptr
239 %tst1 = fcmp olt half %lhs, %rhs
240 %val1 = select i1 %tst1, half %a, half %b
241 store half %val1, half* @varhalf
245 define void @test_vsel32ult(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
246 ; CHECK-LABEL: test_vsel32ult:
248 ; CHECK-NEXT: vldr.16 s0, [r2]
249 ; CHECK-NEXT: vldr.16 s2, [r3]
250 ; CHECK-NEXT: vldr.16 s4, [r0]
251 ; CHECK-NEXT: vldr.16 s6, [r1]
252 ; CHECK-NEXT: movw r0, :lower16:varhalf
253 ; CHECK-NEXT: vcmpe.f16 s4, s6
254 ; CHECK-NEXT: movt r0, :upper16:varhalf
255 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
256 ; CHECK-NEXT: vselge.f16 s0, s2, s0
257 ; CHECK-NEXT: vstr.16 s0, [r0]
259 %a = load volatile half, half* %a_ptr
260 %b = load volatile half, half* %b_ptr
261 %lhs = load volatile half, half* %lhs_ptr
262 %rhs = load volatile half, half* %rhs_ptr
263 %tst1 = fcmp ult half %lhs, %rhs
264 %val1 = select i1 %tst1, half %a, half %b
265 store half %val1, half* @varhalf
269 define void @test_vsel32ole(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
270 ; CHECK-LABEL: test_vsel32ole:
272 ; CHECK-NEXT: vldr.16 s0, [r2]
273 ; CHECK-NEXT: vldr.16 s2, [r3]
274 ; CHECK-NEXT: vldr.16 s4, [r0]
275 ; CHECK-NEXT: vldr.16 s6, [r1]
276 ; CHECK-NEXT: movw r0, :lower16:varhalf
277 ; CHECK-NEXT: vcmpe.f16 s6, s4
278 ; CHECK-NEXT: movt r0, :upper16:varhalf
279 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
280 ; CHECK-NEXT: vselge.f16 s0, s0, s2
281 ; CHECK-NEXT: vstr.16 s0, [r0]
283 %a = load volatile half, half* %a_ptr
284 %b = load volatile half, half* %b_ptr
285 %lhs = load volatile half, half* %lhs_ptr
286 %rhs = load volatile half, half* %rhs_ptr
287 %tst1 = fcmp ole half %lhs, %rhs
288 %val1 = select i1 %tst1, half %a, half %b
289 store half %val1, half* @varhalf
293 define void @test_vsel32ule(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
294 ; CHECK-LABEL: test_vsel32ule:
296 ; CHECK-NEXT: vldr.16 s0, [r2]
297 ; CHECK-NEXT: vldr.16 s2, [r3]
298 ; CHECK-NEXT: vldr.16 s4, [r0]
299 ; CHECK-NEXT: vldr.16 s6, [r1]
300 ; CHECK-NEXT: movw r0, :lower16:varhalf
301 ; CHECK-NEXT: vcmpe.f16 s4, s6
302 ; CHECK-NEXT: movt r0, :upper16:varhalf
303 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
304 ; CHECK-NEXT: vselgt.f16 s0, s2, s0
305 ; CHECK-NEXT: vstr.16 s0, [r0]
307 %a = load volatile half, half* %a_ptr
308 %b = load volatile half, half* %b_ptr
309 %lhs = load volatile half, half* %lhs_ptr
310 %rhs = load volatile half, half* %rhs_ptr
311 %tst1 = fcmp ule half %lhs, %rhs
312 %val1 = select i1 %tst1, half %a, half %b
313 store half %val1, half* @varhalf
317 define void @test_vsel32ord(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
318 ; CHECK-LABEL: test_vsel32ord:
320 ; CHECK-NEXT: vldr.16 s0, [r2]
321 ; CHECK-NEXT: vldr.16 s2, [r3]
322 ; CHECK-NEXT: vldr.16 s4, [r0]
323 ; CHECK-NEXT: vldr.16 s6, [r1]
324 ; CHECK-NEXT: movw r0, :lower16:varhalf
325 ; CHECK-NEXT: vcmpe.f16 s4, s6
326 ; CHECK-NEXT: movt r0, :upper16:varhalf
327 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
328 ; CHECK-NEXT: vselvs.f16 s0, s2, s0
329 ; CHECK-NEXT: vstr.16 s0, [r0]
331 %a = load volatile half, half* %a_ptr
332 %b = load volatile half, half* %b_ptr
333 %lhs = load volatile half, half* %lhs_ptr
334 %rhs = load volatile half, half* %rhs_ptr
335 %tst1 = fcmp ord half %lhs, %rhs
336 %val1 = select i1 %tst1, half %a, half %b
337 store half %val1, half* @varhalf
341 define void @test_vsel32une(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
342 ; CHECK-LABEL: test_vsel32une:
344 ; CHECK-NEXT: vldr.16 s0, [r2]
345 ; CHECK-NEXT: vldr.16 s2, [r3]
346 ; CHECK-NEXT: vldr.16 s4, [r0]
347 ; CHECK-NEXT: vldr.16 s6, [r1]
348 ; CHECK-NEXT: movw r0, :lower16:varhalf
349 ; CHECK-NEXT: vcmp.f16 s4, s6
350 ; CHECK-NEXT: movt r0, :upper16:varhalf
351 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
352 ; CHECK-NEXT: vseleq.f16 s0, s2, s0
353 ; CHECK-NEXT: vstr.16 s0, [r0]
355 %a = load volatile half, half* %a_ptr
356 %b = load volatile half, half* %b_ptr
357 %lhs = load volatile half, half* %lhs_ptr
358 %rhs = load volatile half, half* %rhs_ptr
359 %tst1 = fcmp une half %lhs, %rhs
360 %val1 = select i1 %tst1, half %a, half %b
361 store half %val1, half* @varhalf
365 define void @test_vsel32uno(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
366 ; CHECK-LABEL: test_vsel32uno:
368 ; CHECK-NEXT: vldr.16 s0, [r2]
369 ; CHECK-NEXT: vldr.16 s2, [r3]
370 ; CHECK-NEXT: vldr.16 s4, [r0]
371 ; CHECK-NEXT: vldr.16 s6, [r1]
372 ; CHECK-NEXT: movw r0, :lower16:varhalf
373 ; CHECK-NEXT: vcmpe.f16 s4, s6
374 ; CHECK-NEXT: movt r0, :upper16:varhalf
375 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
376 ; CHECK-NEXT: vselvs.f16 s0, s0, s2
377 ; CHECK-NEXT: vstr.16 s0, [r0]
379 %a = load volatile half, half* %a_ptr
380 %b = load volatile half, half* %b_ptr
381 %lhs = load volatile half, half* %lhs_ptr
382 %rhs = load volatile half, half* %rhs_ptr
383 %tst1 = fcmp uno half %lhs, %rhs
384 %val1 = select i1 %tst1, half %a, half %b
385 store half %val1, half* @varhalf
390 define void @test_vsel32ogt_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
391 ; CHECK-LABEL: test_vsel32ogt_nnan:
393 ; CHECK-NEXT: vldr.16 s0, [r2]
394 ; CHECK-NEXT: vldr.16 s2, [r3]
395 ; CHECK-NEXT: vldr.16 s4, [r0]
396 ; CHECK-NEXT: vldr.16 s6, [r1]
397 ; CHECK-NEXT: movw r0, :lower16:varhalf
398 ; CHECK-NEXT: vcmpe.f16 s4, s6
399 ; CHECK-NEXT: movt r0, :upper16:varhalf
400 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
401 ; CHECK-NEXT: vselgt.f16 s0, s0, s2
402 ; CHECK-NEXT: vstr.16 s0, [r0]
404 %a = load volatile half, half* %a_ptr
405 %b = load volatile half, half* %b_ptr
406 %lhs = load volatile half, half* %lhs_ptr
407 %rhs = load volatile half, half* %rhs_ptr
408 %tst1 = fcmp nnan ogt half %lhs, %rhs
409 %val1 = select i1 %tst1, half %a, half %b
410 store half %val1, half* @varhalf
414 define void @test_vsel32oge_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
415 ; CHECK-LABEL: test_vsel32oge_nnan:
417 ; CHECK-NEXT: vldr.16 s0, [r2]
418 ; CHECK-NEXT: vldr.16 s2, [r3]
419 ; CHECK-NEXT: vldr.16 s4, [r0]
420 ; CHECK-NEXT: vldr.16 s6, [r1]
421 ; CHECK-NEXT: movw r0, :lower16:varhalf
422 ; CHECK-NEXT: vcmpe.f16 s4, s6
423 ; CHECK-NEXT: movt r0, :upper16:varhalf
424 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
425 ; CHECK-NEXT: vselge.f16 s0, s0, s2
426 ; CHECK-NEXT: vstr.16 s0, [r0]
428 %a = load volatile half, half* %a_ptr
429 %b = load volatile half, half* %b_ptr
430 %lhs = load volatile half, half* %lhs_ptr
431 %rhs = load volatile half, half* %rhs_ptr
432 %tst1 = fcmp nnan oge half %lhs, %rhs
433 %val1 = select i1 %tst1, half %a, half %b
434 store half %val1, half* @varhalf
438 define void @test_vsel32oeq_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
439 ; CHECK-LABEL: test_vsel32oeq_nnan:
441 ; CHECK-NEXT: vldr.16 s0, [r2]
442 ; CHECK-NEXT: vldr.16 s2, [r3]
443 ; CHECK-NEXT: vldr.16 s4, [r0]
444 ; CHECK-NEXT: vldr.16 s6, [r1]
445 ; CHECK-NEXT: movw r0, :lower16:varhalf
446 ; CHECK-NEXT: vcmp.f16 s4, s6
447 ; CHECK-NEXT: movt r0, :upper16:varhalf
448 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
449 ; CHECK-NEXT: vseleq.f16 s0, s0, s2
450 ; CHECK-NEXT: vstr.16 s0, [r0]
452 %a = load volatile half, half* %a_ptr
453 %b = load volatile half, half* %b_ptr
454 %lhs = load volatile half, half* %lhs_ptr
455 %rhs = load volatile half, half* %rhs_ptr
456 %tst1 = fcmp nnan oeq half %lhs, %rhs
457 %val1 = select i1 %tst1, half %a, half %b
458 store half %val1, half* @varhalf
462 define void @test_vsel32ugt_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
463 ; CHECK-LABEL: test_vsel32ugt_nnan:
465 ; CHECK-NEXT: vldr.16 s0, [r2]
466 ; CHECK-NEXT: vldr.16 s2, [r3]
467 ; CHECK-NEXT: vldr.16 s4, [r0]
468 ; CHECK-NEXT: vldr.16 s6, [r1]
469 ; CHECK-NEXT: movw r0, :lower16:varhalf
470 ; CHECK-NEXT: vcmpe.f16 s4, s6
471 ; CHECK-NEXT: movt r0, :upper16:varhalf
472 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
473 ; CHECK-NEXT: vselgt.f16 s0, s0, s2
474 ; CHECK-NEXT: vstr.16 s0, [r0]
476 %a = load volatile half, half* %a_ptr
477 %b = load volatile half, half* %b_ptr
478 %lhs = load volatile half, half* %lhs_ptr
479 %rhs = load volatile half, half* %rhs_ptr
480 %tst1 = fcmp nnan ugt half %lhs, %rhs
481 %val1 = select i1 %tst1, half %a, half %b
482 store half %val1, half* @varhalf
486 define void @test_vsel32uge_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
487 ; CHECK-LABEL: test_vsel32uge_nnan:
489 ; CHECK-NEXT: vldr.16 s0, [r2]
490 ; CHECK-NEXT: vldr.16 s2, [r3]
491 ; CHECK-NEXT: vldr.16 s4, [r0]
492 ; CHECK-NEXT: vldr.16 s6, [r1]
493 ; CHECK-NEXT: movw r0, :lower16:varhalf
494 ; CHECK-NEXT: vcmpe.f16 s4, s6
495 ; CHECK-NEXT: movt r0, :upper16:varhalf
496 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
497 ; CHECK-NEXT: vselge.f16 s0, s0, s2
498 ; CHECK-NEXT: vstr.16 s0, [r0]
500 %a = load volatile half, half* %a_ptr
501 %b = load volatile half, half* %b_ptr
502 %lhs = load volatile half, half* %lhs_ptr
503 %rhs = load volatile half, half* %rhs_ptr
504 %tst1 = fcmp nnan uge half %lhs, %rhs
505 %val1 = select i1 %tst1, half %a, half %b
506 store half %val1, half* @varhalf
510 define void @test_vsel32olt_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
511 ; CHECK-LABEL: test_vsel32olt_nnan:
513 ; CHECK-NEXT: vldr.16 s0, [r2]
514 ; CHECK-NEXT: vldr.16 s2, [r3]
515 ; CHECK-NEXT: vldr.16 s4, [r0]
516 ; CHECK-NEXT: vldr.16 s6, [r1]
517 ; CHECK-NEXT: movw r0, :lower16:varhalf
518 ; CHECK-NEXT: vcmpe.f16 s6, s4
519 ; CHECK-NEXT: movt r0, :upper16:varhalf
520 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
521 ; CHECK-NEXT: vselgt.f16 s0, s0, s2
522 ; CHECK-NEXT: vstr.16 s0, [r0]
524 %a = load volatile half, half* %a_ptr
525 %b = load volatile half, half* %b_ptr
526 %lhs = load volatile half, half* %lhs_ptr
527 %rhs = load volatile half, half* %rhs_ptr
528 %tst1 = fcmp nnan olt half %lhs, %rhs
529 %val1 = select i1 %tst1, half %a, half %b
530 store half %val1, half* @varhalf
534 define void @test_vsel32ult_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
535 ; CHECK-LABEL: test_vsel32ult_nnan:
537 ; CHECK-NEXT: vldr.16 s0, [r2]
538 ; CHECK-NEXT: vldr.16 s2, [r3]
539 ; CHECK-NEXT: vldr.16 s4, [r0]
540 ; CHECK-NEXT: vldr.16 s6, [r1]
541 ; CHECK-NEXT: movw r0, :lower16:varhalf
542 ; CHECK-NEXT: vcmpe.f16 s6, s4
543 ; CHECK-NEXT: movt r0, :upper16:varhalf
544 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
545 ; CHECK-NEXT: vselgt.f16 s0, s0, s2
546 ; CHECK-NEXT: vstr.16 s0, [r0]
548 %a = load volatile half, half* %a_ptr
549 %b = load volatile half, half* %b_ptr
550 %lhs = load volatile half, half* %lhs_ptr
551 %rhs = load volatile half, half* %rhs_ptr
552 %tst1 = fcmp nnan ult half %lhs, %rhs
553 %val1 = select i1 %tst1, half %a, half %b
554 store half %val1, half* @varhalf
558 define void @test_vsel32ole_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
559 ; CHECK-LABEL: test_vsel32ole_nnan:
561 ; CHECK-NEXT: vldr.16 s0, [r2]
562 ; CHECK-NEXT: vldr.16 s2, [r3]
563 ; CHECK-NEXT: vldr.16 s4, [r0]
564 ; CHECK-NEXT: vldr.16 s6, [r1]
565 ; CHECK-NEXT: movw r0, :lower16:varhalf
566 ; CHECK-NEXT: vcmpe.f16 s6, s4
567 ; CHECK-NEXT: movt r0, :upper16:varhalf
568 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
569 ; CHECK-NEXT: vselge.f16 s0, s0, s2
570 ; CHECK-NEXT: vstr.16 s0, [r0]
572 %a = load volatile half, half* %a_ptr
573 %b = load volatile half, half* %b_ptr
574 %lhs = load volatile half, half* %lhs_ptr
575 %rhs = load volatile half, half* %rhs_ptr
576 %tst1 = fcmp nnan ole half %lhs, %rhs
577 %val1 = select i1 %tst1, half %a, half %b
578 store half %val1, half* @varhalf
582 define void @test_vsel32ule_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
583 ; CHECK-LABEL: test_vsel32ule_nnan:
585 ; CHECK-NEXT: vldr.16 s0, [r2]
586 ; CHECK-NEXT: vldr.16 s2, [r3]
587 ; CHECK-NEXT: vldr.16 s4, [r0]
588 ; CHECK-NEXT: vldr.16 s6, [r1]
589 ; CHECK-NEXT: movw r0, :lower16:varhalf
590 ; CHECK-NEXT: vcmpe.f16 s6, s4
591 ; CHECK-NEXT: movt r0, :upper16:varhalf
592 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
593 ; CHECK-NEXT: vselge.f16 s0, s0, s2
594 ; CHECK-NEXT: vstr.16 s0, [r0]
596 %a = load volatile half, half* %a_ptr
597 %b = load volatile half, half* %b_ptr
598 %lhs = load volatile half, half* %lhs_ptr
599 %rhs = load volatile half, half* %rhs_ptr
600 %tst1 = fcmp nnan ule half %lhs, %rhs
601 %val1 = select i1 %tst1, half %a, half %b
602 store half %val1, half* @varhalf
606 define void @test_vsel32ord_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
607 ; CHECK-LABEL: test_vsel32ord_nnan:
609 ; CHECK-NEXT: vldr.16 s0, [r2]
610 ; CHECK-NEXT: vldr.16 s2, [r3]
611 ; CHECK-NEXT: vldr.16 s4, [r0]
612 ; CHECK-NEXT: vldr.16 s6, [r1]
613 ; CHECK-NEXT: movw r0, :lower16:varhalf
614 ; CHECK-NEXT: vcmpe.f16 s4, s6
615 ; CHECK-NEXT: movt r0, :upper16:varhalf
616 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
617 ; CHECK-NEXT: vselvs.f16 s0, s2, s0
618 ; CHECK-NEXT: vstr.16 s0, [r0]
620 %a = load volatile half, half* %a_ptr
621 %b = load volatile half, half* %b_ptr
622 %lhs = load volatile half, half* %lhs_ptr
623 %rhs = load volatile half, half* %rhs_ptr
624 %tst1 = fcmp nnan ord half %lhs, %rhs
625 %val1 = select i1 %tst1, half %a, half %b
626 store half %val1, half* @varhalf
630 define void @test_vsel32une_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
631 ; CHECK-LABEL: test_vsel32une_nnan:
633 ; CHECK-NEXT: vldr.16 s0, [r2]
634 ; CHECK-NEXT: vldr.16 s2, [r3]
635 ; CHECK-NEXT: vldr.16 s4, [r0]
636 ; CHECK-NEXT: vldr.16 s6, [r1]
637 ; CHECK-NEXT: movw r0, :lower16:varhalf
638 ; CHECK-NEXT: vcmp.f16 s4, s6
639 ; CHECK-NEXT: movt r0, :upper16:varhalf
640 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
641 ; CHECK-NEXT: vseleq.f16 s0, s2, s0
642 ; CHECK-NEXT: vstr.16 s0, [r0]
644 %a = load volatile half, half* %a_ptr
645 %b = load volatile half, half* %b_ptr
646 %lhs = load volatile half, half* %lhs_ptr
647 %rhs = load volatile half, half* %rhs_ptr
648 %tst1 = fcmp nnan une half %lhs, %rhs
649 %val1 = select i1 %tst1, half %a, half %b
650 store half %val1, half* @varhalf
654 define void @test_vsel32uno_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
655 ; CHECK-LABEL: test_vsel32uno_nnan:
657 ; CHECK-NEXT: vldr.16 s0, [r2]
658 ; CHECK-NEXT: vldr.16 s2, [r3]
659 ; CHECK-NEXT: vldr.16 s4, [r0]
660 ; CHECK-NEXT: vldr.16 s6, [r1]
661 ; CHECK-NEXT: movw r0, :lower16:varhalf
662 ; CHECK-NEXT: vcmpe.f16 s4, s6
663 ; CHECK-NEXT: movt r0, :upper16:varhalf
664 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr
665 ; CHECK-NEXT: vselvs.f16 s0, s0, s2
666 ; CHECK-NEXT: vstr.16 s0, [r0]
668 %a = load volatile half, half* %a_ptr
669 %b = load volatile half, half* %b_ptr
670 %lhs = load volatile half, half* %lhs_ptr
671 %rhs = load volatile half, half* %rhs_ptr
672 %tst1 = fcmp nnan uno half %lhs, %rhs
673 %val1 = select i1 %tst1, half %a, half %b
674 store half %val1, half* @varhalf