1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs \
3 ; RUN: -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | FileCheck %s
4 ; RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs \
5 ; RUN: -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | FileCheck %s \
6 ; RUN: -check-prefix=CHECK-P8
8 @a_qp = common dso_local global fp128 0xL00000000000000000000000000000000, align 16
9 @b_qp = common dso_local global fp128 0xL00000000000000000000000000000000, align 16
11 ; Function Attrs: noinline nounwind optnone
12 define dso_local signext i32 @greater_qp() {
13 ; CHECK-LABEL: greater_qp:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha
16 ; CHECK-NEXT: li r4, 1
17 ; CHECK-NEXT: addi r3, r3, a_qp@toc@l
18 ; CHECK-NEXT: lxv v2, 0(r3)
19 ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha
20 ; CHECK-NEXT: addi r3, r3, b_qp@toc@l
21 ; CHECK-NEXT: lxv v3, 0(r3)
22 ; CHECK-NEXT: li r3, 0
23 ; CHECK-NEXT: xscmpuqp cr0, v2, v3
24 ; CHECK-NEXT: iselgt r3, r4, r3
27 ; CHECK-P8-LABEL: greater_qp:
28 ; CHECK-P8: # %bb.0: # %entry
29 ; CHECK-P8-NEXT: mflr r0
30 ; CHECK-P8-NEXT: std r0, 16(r1)
31 ; CHECK-P8-NEXT: stdu r1, -32(r1)
32 ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
33 ; CHECK-P8-NEXT: .cfi_offset lr, 16
34 ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha
35 ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha
36 ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l
37 ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l
38 ; CHECK-P8-NEXT: lvx v2, 0, r3
39 ; CHECK-P8-NEXT: lvx v3, 0, r4
40 ; CHECK-P8-NEXT: bl __gtkf2
42 ; CHECK-P8-NEXT: extsw r3, r3
43 ; CHECK-P8-NEXT: neg r3, r3
44 ; CHECK-P8-NEXT: rldicl r3, r3, 1, 63
45 ; CHECK-P8-NEXT: addi r1, r1, 32
46 ; CHECK-P8-NEXT: ld r0, 16(r1)
47 ; CHECK-P8-NEXT: mtlr r0
50 %0 = load fp128, fp128* @a_qp, align 16
51 %1 = load fp128, fp128* @b_qp, align 16
52 %cmp = fcmp ogt fp128 %0, %1
53 %conv = zext i1 %cmp to i32
57 ; Function Attrs: noinline nounwind optnone
58 define dso_local signext i32 @less_qp() {
59 ; CHECK-LABEL: less_qp:
60 ; CHECK: # %bb.0: # %entry
61 ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha
62 ; CHECK-NEXT: li r4, 1
63 ; CHECK-NEXT: addi r3, r3, a_qp@toc@l
64 ; CHECK-NEXT: lxv v2, 0(r3)
65 ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha
66 ; CHECK-NEXT: addi r3, r3, b_qp@toc@l
67 ; CHECK-NEXT: lxv v3, 0(r3)
68 ; CHECK-NEXT: li r3, 0
69 ; CHECK-NEXT: xscmpuqp cr0, v2, v3
70 ; CHECK-NEXT: isellt r3, r4, r3
73 ; CHECK-P8-LABEL: less_qp:
74 ; CHECK-P8: # %bb.0: # %entry
75 ; CHECK-P8-NEXT: mflr r0
76 ; CHECK-P8-NEXT: std r0, 16(r1)
77 ; CHECK-P8-NEXT: stdu r1, -32(r1)
78 ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
79 ; CHECK-P8-NEXT: .cfi_offset lr, 16
80 ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha
81 ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha
82 ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l
83 ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l
84 ; CHECK-P8-NEXT: lvx v2, 0, r3
85 ; CHECK-P8-NEXT: lvx v3, 0, r4
86 ; CHECK-P8-NEXT: bl __ltkf2
88 ; CHECK-P8-NEXT: rlwinm r3, r3, 1, 31, 31
89 ; CHECK-P8-NEXT: addi r1, r1, 32
90 ; CHECK-P8-NEXT: ld r0, 16(r1)
91 ; CHECK-P8-NEXT: mtlr r0
94 %0 = load fp128, fp128* @a_qp, align 16
95 %1 = load fp128, fp128* @b_qp, align 16
96 %cmp = fcmp olt fp128 %0, %1
97 %conv = zext i1 %cmp to i32
101 ; Function Attrs: noinline nounwind optnone
102 define dso_local signext i32 @greater_eq_qp() {
103 ; CHECK-LABEL: greater_eq_qp:
104 ; CHECK: # %bb.0: # %entry
105 ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha
106 ; CHECK-NEXT: addi r3, r3, a_qp@toc@l
107 ; CHECK-NEXT: lxv v2, 0(r3)
108 ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha
109 ; CHECK-NEXT: addi r3, r3, b_qp@toc@l
110 ; CHECK-NEXT: lxv v3, 0(r3)
111 ; CHECK-NEXT: li r3, 1
112 ; CHECK-NEXT: xscmpuqp cr0, v2, v3
113 ; CHECK-NEXT: cror 4*cr5+lt, un, lt
114 ; CHECK-NEXT: isel r3, 0, r3, 4*cr5+lt
117 ; CHECK-P8-LABEL: greater_eq_qp:
118 ; CHECK-P8: # %bb.0: # %entry
119 ; CHECK-P8-NEXT: mflr r0
120 ; CHECK-P8-NEXT: std r0, 16(r1)
121 ; CHECK-P8-NEXT: stdu r1, -32(r1)
122 ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
123 ; CHECK-P8-NEXT: .cfi_offset lr, 16
124 ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha
125 ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha
126 ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l
127 ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l
128 ; CHECK-P8-NEXT: lvx v2, 0, r3
129 ; CHECK-P8-NEXT: lvx v3, 0, r4
130 ; CHECK-P8-NEXT: bl __gekf2
132 ; CHECK-P8-NEXT: rlwinm r3, r3, 1, 31, 31
133 ; CHECK-P8-NEXT: xori r3, r3, 1
134 ; CHECK-P8-NEXT: addi r1, r1, 32
135 ; CHECK-P8-NEXT: ld r0, 16(r1)
136 ; CHECK-P8-NEXT: mtlr r0
139 %0 = load fp128, fp128* @a_qp, align 16
140 %1 = load fp128, fp128* @b_qp, align 16
141 %cmp = fcmp oge fp128 %0, %1
142 %conv = zext i1 %cmp to i32
146 ; Function Attrs: noinline nounwind optnone
147 define dso_local signext i32 @less_eq_qp() {
148 ; CHECK-LABEL: less_eq_qp:
149 ; CHECK: # %bb.0: # %entry
150 ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha
151 ; CHECK-NEXT: addi r3, r3, a_qp@toc@l
152 ; CHECK-NEXT: lxv v2, 0(r3)
153 ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha
154 ; CHECK-NEXT: addi r3, r3, b_qp@toc@l
155 ; CHECK-NEXT: lxv v3, 0(r3)
156 ; CHECK-NEXT: li r3, 1
157 ; CHECK-NEXT: xscmpuqp cr0, v2, v3
158 ; CHECK-NEXT: cror 4*cr5+lt, un, gt
159 ; CHECK-NEXT: isel r3, 0, r3, 4*cr5+lt
162 ; CHECK-P8-LABEL: less_eq_qp:
163 ; CHECK-P8: # %bb.0: # %entry
164 ; CHECK-P8-NEXT: mflr r0
165 ; CHECK-P8-NEXT: std r0, 16(r1)
166 ; CHECK-P8-NEXT: stdu r1, -32(r1)
167 ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
168 ; CHECK-P8-NEXT: .cfi_offset lr, 16
169 ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha
170 ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha
171 ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l
172 ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l
173 ; CHECK-P8-NEXT: lvx v2, 0, r3
174 ; CHECK-P8-NEXT: lvx v3, 0, r4
175 ; CHECK-P8-NEXT: bl __lekf2
177 ; CHECK-P8-NEXT: extsw r3, r3
178 ; CHECK-P8-NEXT: neg r3, r3
179 ; CHECK-P8-NEXT: rldicl r3, r3, 1, 63
180 ; CHECK-P8-NEXT: xori r3, r3, 1
181 ; CHECK-P8-NEXT: addi r1, r1, 32
182 ; CHECK-P8-NEXT: ld r0, 16(r1)
183 ; CHECK-P8-NEXT: mtlr r0
186 %0 = load fp128, fp128* @a_qp, align 16
187 %1 = load fp128, fp128* @b_qp, align 16
188 %cmp = fcmp ole fp128 %0, %1
189 %conv = zext i1 %cmp to i32
193 ; Function Attrs: noinline nounwind optnone
194 define dso_local signext i32 @equal_qp() {
195 ; CHECK-LABEL: equal_qp:
196 ; CHECK: # %bb.0: # %entry
197 ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha
198 ; CHECK-NEXT: li r4, 1
199 ; CHECK-NEXT: addi r3, r3, a_qp@toc@l
200 ; CHECK-NEXT: lxv v2, 0(r3)
201 ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha
202 ; CHECK-NEXT: addi r3, r3, b_qp@toc@l
203 ; CHECK-NEXT: lxv v3, 0(r3)
204 ; CHECK-NEXT: li r3, 0
205 ; CHECK-NEXT: xscmpuqp cr0, v2, v3
206 ; CHECK-NEXT: iseleq r3, r4, r3
209 ; CHECK-P8-LABEL: equal_qp:
210 ; CHECK-P8: # %bb.0: # %entry
211 ; CHECK-P8-NEXT: mflr r0
212 ; CHECK-P8-NEXT: std r0, 16(r1)
213 ; CHECK-P8-NEXT: stdu r1, -32(r1)
214 ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
215 ; CHECK-P8-NEXT: .cfi_offset lr, 16
216 ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha
217 ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha
218 ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l
219 ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l
220 ; CHECK-P8-NEXT: lvx v2, 0, r3
221 ; CHECK-P8-NEXT: lvx v3, 0, r4
222 ; CHECK-P8-NEXT: bl __eqkf2
224 ; CHECK-P8-NEXT: cntlzw r3, r3
225 ; CHECK-P8-NEXT: srwi r3, r3, 5
226 ; CHECK-P8-NEXT: addi r1, r1, 32
227 ; CHECK-P8-NEXT: ld r0, 16(r1)
228 ; CHECK-P8-NEXT: mtlr r0
231 %0 = load fp128, fp128* @a_qp, align 16
232 %1 = load fp128, fp128* @b_qp, align 16
233 %cmp = fcmp oeq fp128 %0, %1
234 %conv = zext i1 %cmp to i32
238 ; Function Attrs: noinline nounwind optnone
239 define dso_local signext i32 @not_greater_qp() {
240 ; CHECK-LABEL: not_greater_qp:
241 ; CHECK: # %bb.0: # %entry
242 ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha
243 ; CHECK-NEXT: addi r3, r3, a_qp@toc@l
244 ; CHECK-NEXT: lxv v2, 0(r3)
245 ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha
246 ; CHECK-NEXT: addi r3, r3, b_qp@toc@l
247 ; CHECK-NEXT: lxv v3, 0(r3)
248 ; CHECK-NEXT: li r3, 1
249 ; CHECK-NEXT: xscmpuqp cr0, v2, v3
250 ; CHECK-NEXT: iselgt r3, 0, r3
253 ; CHECK-P8-LABEL: not_greater_qp:
254 ; CHECK-P8: # %bb.0: # %entry
255 ; CHECK-P8-NEXT: mflr r0
256 ; CHECK-P8-NEXT: std r0, 16(r1)
257 ; CHECK-P8-NEXT: stdu r1, -32(r1)
258 ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
259 ; CHECK-P8-NEXT: .cfi_offset lr, 16
260 ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha
261 ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha
262 ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l
263 ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l
264 ; CHECK-P8-NEXT: lvx v2, 0, r3
265 ; CHECK-P8-NEXT: lvx v3, 0, r4
266 ; CHECK-P8-NEXT: bl __gtkf2
268 ; CHECK-P8-NEXT: extsw r3, r3
269 ; CHECK-P8-NEXT: neg r3, r3
270 ; CHECK-P8-NEXT: rldicl r3, r3, 1, 63
271 ; CHECK-P8-NEXT: xori r3, r3, 1
272 ; CHECK-P8-NEXT: addi r1, r1, 32
273 ; CHECK-P8-NEXT: ld r0, 16(r1)
274 ; CHECK-P8-NEXT: mtlr r0
277 %0 = load fp128, fp128* @a_qp, align 16
278 %1 = load fp128, fp128* @b_qp, align 16
279 %cmp = fcmp ogt fp128 %0, %1
280 %lnot = xor i1 %cmp, true
281 %lnot.ext = zext i1 %lnot to i32
285 ; Function Attrs: noinline nounwind optnone
286 define dso_local signext i32 @not_less_qp() {
287 ; CHECK-LABEL: not_less_qp:
288 ; CHECK: # %bb.0: # %entry
289 ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha
290 ; CHECK-NEXT: addi r3, r3, a_qp@toc@l
291 ; CHECK-NEXT: lxv v2, 0(r3)
292 ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha
293 ; CHECK-NEXT: addi r3, r3, b_qp@toc@l
294 ; CHECK-NEXT: lxv v3, 0(r3)
295 ; CHECK-NEXT: li r3, 1
296 ; CHECK-NEXT: xscmpuqp cr0, v2, v3
297 ; CHECK-NEXT: isellt r3, 0, r3
300 ; CHECK-P8-LABEL: not_less_qp:
301 ; CHECK-P8: # %bb.0: # %entry
302 ; CHECK-P8-NEXT: mflr r0
303 ; CHECK-P8-NEXT: std r0, 16(r1)
304 ; CHECK-P8-NEXT: stdu r1, -32(r1)
305 ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
306 ; CHECK-P8-NEXT: .cfi_offset lr, 16
307 ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha
308 ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha
309 ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l
310 ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l
311 ; CHECK-P8-NEXT: lvx v2, 0, r3
312 ; CHECK-P8-NEXT: lvx v3, 0, r4
313 ; CHECK-P8-NEXT: bl __ltkf2
315 ; CHECK-P8-NEXT: rlwinm r3, r3, 1, 31, 31
316 ; CHECK-P8-NEXT: xori r3, r3, 1
317 ; CHECK-P8-NEXT: addi r1, r1, 32
318 ; CHECK-P8-NEXT: ld r0, 16(r1)
319 ; CHECK-P8-NEXT: mtlr r0
322 %0 = load fp128, fp128* @a_qp, align 16
323 %1 = load fp128, fp128* @b_qp, align 16
324 %cmp = fcmp olt fp128 %0, %1
325 %lnot = xor i1 %cmp, true
326 %lnot.ext = zext i1 %lnot to i32
330 ; Function Attrs: noinline nounwind optnone
331 define dso_local signext i32 @not_greater_eq_qp() {
332 ; CHECK-LABEL: not_greater_eq_qp:
333 ; CHECK: # %bb.0: # %entry
334 ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha
335 ; CHECK-NEXT: addi r3, r3, a_qp@toc@l
336 ; CHECK-NEXT: lxv v2, 0(r3)
337 ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha
338 ; CHECK-NEXT: addi r3, r3, b_qp@toc@l
339 ; CHECK-NEXT: lxv v3, 0(r3)
340 ; CHECK-NEXT: li r3, 1
341 ; CHECK-NEXT: xscmpuqp cr0, v2, v3
342 ; CHECK-NEXT: crnor 4*cr5+lt, lt, un
343 ; CHECK-NEXT: isel r3, 0, r3, 4*cr5+lt
346 ; CHECK-P8-LABEL: not_greater_eq_qp:
347 ; CHECK-P8: # %bb.0: # %entry
348 ; CHECK-P8-NEXT: mflr r0
349 ; CHECK-P8-NEXT: std r0, 16(r1)
350 ; CHECK-P8-NEXT: stdu r1, -32(r1)
351 ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
352 ; CHECK-P8-NEXT: .cfi_offset lr, 16
353 ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha
354 ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha
355 ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l
356 ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l
357 ; CHECK-P8-NEXT: lvx v2, 0, r3
358 ; CHECK-P8-NEXT: lvx v3, 0, r4
359 ; CHECK-P8-NEXT: bl __gekf2
361 ; CHECK-P8-NEXT: rlwinm r3, r3, 1, 31, 31
362 ; CHECK-P8-NEXT: addi r1, r1, 32
363 ; CHECK-P8-NEXT: ld r0, 16(r1)
364 ; CHECK-P8-NEXT: mtlr r0
367 %0 = load fp128, fp128* @a_qp, align 16
368 %1 = load fp128, fp128* @b_qp, align 16
369 %cmp = fcmp oge fp128 %0, %1
370 %lnot = xor i1 %cmp, true
371 %lnot.ext = zext i1 %lnot to i32
375 ; Function Attrs: noinline nounwind optnone
376 define dso_local signext i32 @not_less_eq_qp() {
377 ; CHECK-LABEL: not_less_eq_qp:
378 ; CHECK: # %bb.0: # %entry
379 ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha
380 ; CHECK-NEXT: addi r3, r3, a_qp@toc@l
381 ; CHECK-NEXT: lxv v2, 0(r3)
382 ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha
383 ; CHECK-NEXT: addi r3, r3, b_qp@toc@l
384 ; CHECK-NEXT: lxv v3, 0(r3)
385 ; CHECK-NEXT: li r3, 1
386 ; CHECK-NEXT: xscmpuqp cr0, v2, v3
387 ; CHECK-NEXT: crnor 4*cr5+lt, gt, un
388 ; CHECK-NEXT: isel r3, 0, r3, 4*cr5+lt
391 ; CHECK-P8-LABEL: not_less_eq_qp:
392 ; CHECK-P8: # %bb.0: # %entry
393 ; CHECK-P8-NEXT: mflr r0
394 ; CHECK-P8-NEXT: std r0, 16(r1)
395 ; CHECK-P8-NEXT: stdu r1, -32(r1)
396 ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
397 ; CHECK-P8-NEXT: .cfi_offset lr, 16
398 ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha
399 ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha
400 ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l
401 ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l
402 ; CHECK-P8-NEXT: lvx v2, 0, r3
403 ; CHECK-P8-NEXT: lvx v3, 0, r4
404 ; CHECK-P8-NEXT: bl __lekf2
406 ; CHECK-P8-NEXT: extsw r3, r3
407 ; CHECK-P8-NEXT: neg r3, r3
408 ; CHECK-P8-NEXT: rldicl r3, r3, 1, 63
409 ; CHECK-P8-NEXT: addi r1, r1, 32
410 ; CHECK-P8-NEXT: ld r0, 16(r1)
411 ; CHECK-P8-NEXT: mtlr r0
414 %0 = load fp128, fp128* @a_qp, align 16
415 %1 = load fp128, fp128* @b_qp, align 16
416 %cmp = fcmp ole fp128 %0, %1
417 %lnot = xor i1 %cmp, true
418 %lnot.ext = zext i1 %lnot to i32
422 ; Function Attrs: noinline nounwind optnone
423 define dso_local signext i32 @not_equal_qp() {
424 ; CHECK-LABEL: not_equal_qp:
425 ; CHECK: # %bb.0: # %entry
426 ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha
427 ; CHECK-NEXT: addi r3, r3, a_qp@toc@l
428 ; CHECK-NEXT: lxv v2, 0(r3)
429 ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha
430 ; CHECK-NEXT: addi r3, r3, b_qp@toc@l
431 ; CHECK-NEXT: lxv v3, 0(r3)
432 ; CHECK-NEXT: li r3, 1
433 ; CHECK-NEXT: xscmpuqp cr0, v2, v3
434 ; CHECK-NEXT: iseleq r3, 0, r3
437 ; CHECK-P8-LABEL: not_equal_qp:
438 ; CHECK-P8: # %bb.0: # %entry
439 ; CHECK-P8-NEXT: mflr r0
440 ; CHECK-P8-NEXT: std r0, 16(r1)
441 ; CHECK-P8-NEXT: stdu r1, -32(r1)
442 ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
443 ; CHECK-P8-NEXT: .cfi_offset lr, 16
444 ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha
445 ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha
446 ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l
447 ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l
448 ; CHECK-P8-NEXT: lvx v2, 0, r3
449 ; CHECK-P8-NEXT: lvx v3, 0, r4
450 ; CHECK-P8-NEXT: bl __nekf2
452 ; CHECK-P8-NEXT: cntlzw r3, r3
453 ; CHECK-P8-NEXT: srwi r3, r3, 5
454 ; CHECK-P8-NEXT: xori r3, r3, 1
455 ; CHECK-P8-NEXT: addi r1, r1, 32
456 ; CHECK-P8-NEXT: ld r0, 16(r1)
457 ; CHECK-P8-NEXT: mtlr r0
460 %0 = load fp128, fp128* @a_qp, align 16
461 %1 = load fp128, fp128* @b_qp, align 16
462 %cmp = fcmp une fp128 %0, %1
463 %conv = zext i1 %cmp to i32
467 ; Function Attrs: norecurse nounwind readonly
468 define fp128 @greater_sel_qp() {
469 ; CHECK-LABEL: greater_sel_qp:
470 ; CHECK: # %bb.0: # %entry
471 ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha
472 ; CHECK-NEXT: addi r3, r3, a_qp@toc@l
473 ; CHECK-NEXT: lxv v2, 0(r3)
474 ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha
475 ; CHECK-NEXT: addi r3, r3, b_qp@toc@l
476 ; CHECK-NEXT: lxv v3, 0(r3)
477 ; CHECK-NEXT: xscmpuqp cr0, v2, v3
478 ; CHECK-NEXT: bgtlr cr0
479 ; CHECK-NEXT: # %bb.1: # %entry
480 ; CHECK-NEXT: vmr v2, v3
483 ; CHECK-P8-LABEL: greater_sel_qp:
484 ; CHECK-P8: # %bb.0: # %entry
485 ; CHECK-P8-NEXT: mflr r0
486 ; CHECK-P8-NEXT: std r0, 16(r1)
487 ; CHECK-P8-NEXT: stdu r1, -80(r1)
488 ; CHECK-P8-NEXT: .cfi_def_cfa_offset 80
489 ; CHECK-P8-NEXT: .cfi_offset lr, 16
490 ; CHECK-P8-NEXT: .cfi_offset v30, -32
491 ; CHECK-P8-NEXT: .cfi_offset v31, -16
492 ; CHECK-P8-NEXT: li r3, 48
493 ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha
494 ; CHECK-P8-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
495 ; CHECK-P8-NEXT: li r3, 64
496 ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l
497 ; CHECK-P8-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
498 ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha
499 ; CHECK-P8-NEXT: lvx v30, 0, r4
500 ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l
501 ; CHECK-P8-NEXT: lvx v31, 0, r3
502 ; CHECK-P8-NEXT: vmr v3, v30
503 ; CHECK-P8-NEXT: vmr v2, v31
504 ; CHECK-P8-NEXT: bl __gtkf2
506 ; CHECK-P8-NEXT: cmpwi r3, 0
507 ; CHECK-P8-NEXT: bgt cr0, .LBB10_2
508 ; CHECK-P8-NEXT: # %bb.1: # %entry
509 ; CHECK-P8-NEXT: vmr v31, v30
510 ; CHECK-P8-NEXT: .LBB10_2: # %entry
511 ; CHECK-P8-NEXT: li r3, 64
512 ; CHECK-P8-NEXT: vmr v2, v31
513 ; CHECK-P8-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
514 ; CHECK-P8-NEXT: li r3, 48
515 ; CHECK-P8-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
516 ; CHECK-P8-NEXT: addi r1, r1, 80
517 ; CHECK-P8-NEXT: ld r0, 16(r1)
518 ; CHECK-P8-NEXT: mtlr r0
521 %0 = load fp128, fp128* @a_qp, align 16
522 %1 = load fp128, fp128* @b_qp, align 16
523 %cmp = fcmp ogt fp128 %0, %1
524 %cond = select i1 %cmp, fp128 %0, fp128 %1
528 ; Function Attrs: noinline nounwind optnone
529 define fp128 @less_sel_qp() {
530 ; CHECK-LABEL: less_sel_qp:
531 ; CHECK: # %bb.0: # %entry
532 ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha
533 ; CHECK-NEXT: addi r3, r3, a_qp@toc@l
534 ; CHECK-NEXT: lxv v2, 0(r3)
535 ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha
536 ; CHECK-NEXT: addi r3, r3, b_qp@toc@l
537 ; CHECK-NEXT: lxv v3, 0(r3)
538 ; CHECK-NEXT: xscmpuqp cr0, v2, v3
539 ; CHECK-NEXT: bltlr cr0
540 ; CHECK-NEXT: # %bb.1: # %entry
541 ; CHECK-NEXT: vmr v2, v3
544 ; CHECK-P8-LABEL: less_sel_qp:
545 ; CHECK-P8: # %bb.0: # %entry
546 ; CHECK-P8-NEXT: mflr r0
547 ; CHECK-P8-NEXT: std r0, 16(r1)
548 ; CHECK-P8-NEXT: stdu r1, -80(r1)
549 ; CHECK-P8-NEXT: .cfi_def_cfa_offset 80
550 ; CHECK-P8-NEXT: .cfi_offset lr, 16
551 ; CHECK-P8-NEXT: .cfi_offset v30, -32
552 ; CHECK-P8-NEXT: .cfi_offset v31, -16
553 ; CHECK-P8-NEXT: li r3, 48
554 ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha
555 ; CHECK-P8-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
556 ; CHECK-P8-NEXT: li r3, 64
557 ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l
558 ; CHECK-P8-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
559 ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha
560 ; CHECK-P8-NEXT: lvx v30, 0, r4
561 ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l
562 ; CHECK-P8-NEXT: lvx v31, 0, r3
563 ; CHECK-P8-NEXT: vmr v3, v30
564 ; CHECK-P8-NEXT: vmr v2, v31
565 ; CHECK-P8-NEXT: bl __ltkf2
567 ; CHECK-P8-NEXT: cmpwi r3, 0
568 ; CHECK-P8-NEXT: blt cr0, .LBB11_2
569 ; CHECK-P8-NEXT: # %bb.1: # %entry
570 ; CHECK-P8-NEXT: vmr v31, v30
571 ; CHECK-P8-NEXT: .LBB11_2: # %entry
572 ; CHECK-P8-NEXT: li r3, 64
573 ; CHECK-P8-NEXT: vmr v2, v31
574 ; CHECK-P8-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
575 ; CHECK-P8-NEXT: li r3, 48
576 ; CHECK-P8-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
577 ; CHECK-P8-NEXT: addi r1, r1, 80
578 ; CHECK-P8-NEXT: ld r0, 16(r1)
579 ; CHECK-P8-NEXT: mtlr r0
582 %0 = load fp128, fp128* @a_qp, align 16
583 %1 = load fp128, fp128* @b_qp, align 16
584 %cmp = fcmp olt fp128 %0, %1
585 %cond = select i1 %cmp, fp128 %0, fp128 %1
589 ; Function Attrs: noinline nounwind optnone
590 define fp128 @greater_eq_sel_qp() {
591 ; CHECK-LABEL: greater_eq_sel_qp:
592 ; CHECK: # %bb.0: # %entry
593 ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha
594 ; CHECK-NEXT: addi r3, r3, a_qp@toc@l
595 ; CHECK-NEXT: lxv v2, 0(r3)
596 ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha
597 ; CHECK-NEXT: addi r3, r3, b_qp@toc@l
598 ; CHECK-NEXT: lxv v3, 0(r3)
599 ; CHECK-NEXT: xscmpuqp cr0, v2, v3
600 ; CHECK-NEXT: crnor 4*cr5+lt, un, lt
601 ; CHECK-NEXT: bclr 12, 4*cr5+lt, 0
602 ; CHECK-NEXT: # %bb.1: # %entry
603 ; CHECK-NEXT: vmr v2, v3
606 ; CHECK-P8-LABEL: greater_eq_sel_qp:
607 ; CHECK-P8: # %bb.0: # %entry
608 ; CHECK-P8-NEXT: mflr r0
609 ; CHECK-P8-NEXT: std r0, 16(r1)
610 ; CHECK-P8-NEXT: stdu r1, -80(r1)
611 ; CHECK-P8-NEXT: .cfi_def_cfa_offset 80
612 ; CHECK-P8-NEXT: .cfi_offset lr, 16
613 ; CHECK-P8-NEXT: .cfi_offset v30, -32
614 ; CHECK-P8-NEXT: .cfi_offset v31, -16
615 ; CHECK-P8-NEXT: li r3, 48
616 ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha
617 ; CHECK-P8-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
618 ; CHECK-P8-NEXT: li r3, 64
619 ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l
620 ; CHECK-P8-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
621 ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha
622 ; CHECK-P8-NEXT: lvx v30, 0, r4
623 ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l
624 ; CHECK-P8-NEXT: lvx v31, 0, r3
625 ; CHECK-P8-NEXT: vmr v3, v30
626 ; CHECK-P8-NEXT: vmr v2, v31
627 ; CHECK-P8-NEXT: bl __gekf2
629 ; CHECK-P8-NEXT: cmpwi r3, -1
630 ; CHECK-P8-NEXT: bgt cr0, .LBB12_2
631 ; CHECK-P8-NEXT: # %bb.1: # %entry
632 ; CHECK-P8-NEXT: vmr v31, v30
633 ; CHECK-P8-NEXT: .LBB12_2: # %entry
634 ; CHECK-P8-NEXT: li r3, 64
635 ; CHECK-P8-NEXT: vmr v2, v31
636 ; CHECK-P8-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
637 ; CHECK-P8-NEXT: li r3, 48
638 ; CHECK-P8-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
639 ; CHECK-P8-NEXT: addi r1, r1, 80
640 ; CHECK-P8-NEXT: ld r0, 16(r1)
641 ; CHECK-P8-NEXT: mtlr r0
644 %0 = load fp128, fp128* @a_qp, align 16
645 %1 = load fp128, fp128* @b_qp, align 16
646 %cmp = fcmp oge fp128 %0, %1
647 %cond = select i1 %cmp, fp128 %0, fp128 %1
651 ; Function Attrs: noinline nounwind optnone
652 define fp128 @less_eq_sel_qp() {
653 ; CHECK-LABEL: less_eq_sel_qp:
654 ; CHECK: # %bb.0: # %entry
655 ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha
656 ; CHECK-NEXT: addi r3, r3, a_qp@toc@l
657 ; CHECK-NEXT: lxv v2, 0(r3)
658 ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha
659 ; CHECK-NEXT: addi r3, r3, b_qp@toc@l
660 ; CHECK-NEXT: lxv v3, 0(r3)
661 ; CHECK-NEXT: xscmpuqp cr0, v2, v3
662 ; CHECK-NEXT: crnor 4*cr5+lt, un, gt
663 ; CHECK-NEXT: bclr 12, 4*cr5+lt, 0
664 ; CHECK-NEXT: # %bb.1: # %entry
665 ; CHECK-NEXT: vmr v2, v3
668 ; CHECK-P8-LABEL: less_eq_sel_qp:
669 ; CHECK-P8: # %bb.0: # %entry
670 ; CHECK-P8-NEXT: mflr r0
671 ; CHECK-P8-NEXT: std r0, 16(r1)
672 ; CHECK-P8-NEXT: stdu r1, -80(r1)
673 ; CHECK-P8-NEXT: .cfi_def_cfa_offset 80
674 ; CHECK-P8-NEXT: .cfi_offset lr, 16
675 ; CHECK-P8-NEXT: .cfi_offset v30, -32
676 ; CHECK-P8-NEXT: .cfi_offset v31, -16
677 ; CHECK-P8-NEXT: li r3, 48
678 ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha
679 ; CHECK-P8-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
680 ; CHECK-P8-NEXT: li r3, 64
681 ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l
682 ; CHECK-P8-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
683 ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha
684 ; CHECK-P8-NEXT: lvx v30, 0, r4
685 ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l
686 ; CHECK-P8-NEXT: lvx v31, 0, r3
687 ; CHECK-P8-NEXT: vmr v3, v30
688 ; CHECK-P8-NEXT: vmr v2, v31
689 ; CHECK-P8-NEXT: bl __lekf2
691 ; CHECK-P8-NEXT: cmpwi r3, 1
692 ; CHECK-P8-NEXT: blt cr0, .LBB13_2
693 ; CHECK-P8-NEXT: # %bb.1: # %entry
694 ; CHECK-P8-NEXT: vmr v31, v30
695 ; CHECK-P8-NEXT: .LBB13_2: # %entry
696 ; CHECK-P8-NEXT: li r3, 64
697 ; CHECK-P8-NEXT: vmr v2, v31
698 ; CHECK-P8-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
699 ; CHECK-P8-NEXT: li r3, 48
700 ; CHECK-P8-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
701 ; CHECK-P8-NEXT: addi r1, r1, 80
702 ; CHECK-P8-NEXT: ld r0, 16(r1)
703 ; CHECK-P8-NEXT: mtlr r0
706 %0 = load fp128, fp128* @a_qp, align 16
707 %1 = load fp128, fp128* @b_qp, align 16
708 %cmp = fcmp ole fp128 %0, %1
709 %cond = select i1 %cmp, fp128 %0, fp128 %1
713 ; Function Attrs: noinline nounwind optnone
714 define fp128 @equal_sel_qp() {
715 ; CHECK-LABEL: equal_sel_qp:
716 ; CHECK: # %bb.0: # %entry
717 ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha
718 ; CHECK-NEXT: addi r3, r3, a_qp@toc@l
719 ; CHECK-NEXT: lxv v2, 0(r3)
720 ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha
721 ; CHECK-NEXT: addi r3, r3, b_qp@toc@l
722 ; CHECK-NEXT: lxv v3, 0(r3)
723 ; CHECK-NEXT: xscmpuqp cr0, v2, v3
724 ; CHECK-NEXT: beqlr cr0
725 ; CHECK-NEXT: # %bb.1: # %entry
726 ; CHECK-NEXT: vmr v2, v3
729 ; CHECK-P8-LABEL: equal_sel_qp:
730 ; CHECK-P8: # %bb.0: # %entry
731 ; CHECK-P8-NEXT: mflr r0
732 ; CHECK-P8-NEXT: std r0, 16(r1)
733 ; CHECK-P8-NEXT: stdu r1, -80(r1)
734 ; CHECK-P8-NEXT: .cfi_def_cfa_offset 80
735 ; CHECK-P8-NEXT: .cfi_offset lr, 16
736 ; CHECK-P8-NEXT: .cfi_offset v30, -32
737 ; CHECK-P8-NEXT: .cfi_offset v31, -16
738 ; CHECK-P8-NEXT: li r3, 48
739 ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha
740 ; CHECK-P8-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
741 ; CHECK-P8-NEXT: li r3, 64
742 ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l
743 ; CHECK-P8-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
744 ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha
745 ; CHECK-P8-NEXT: lvx v30, 0, r4
746 ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l
747 ; CHECK-P8-NEXT: lvx v31, 0, r3
748 ; CHECK-P8-NEXT: vmr v3, v30
749 ; CHECK-P8-NEXT: vmr v2, v31
750 ; CHECK-P8-NEXT: bl __eqkf2
752 ; CHECK-P8-NEXT: cmplwi r3, 0
753 ; CHECK-P8-NEXT: beq cr0, .LBB14_2
754 ; CHECK-P8-NEXT: # %bb.1: # %entry
755 ; CHECK-P8-NEXT: vmr v31, v30
756 ; CHECK-P8-NEXT: .LBB14_2: # %entry
757 ; CHECK-P8-NEXT: li r3, 64
758 ; CHECK-P8-NEXT: vmr v2, v31
759 ; CHECK-P8-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
760 ; CHECK-P8-NEXT: li r3, 48
761 ; CHECK-P8-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
762 ; CHECK-P8-NEXT: addi r1, r1, 80
763 ; CHECK-P8-NEXT: ld r0, 16(r1)
764 ; CHECK-P8-NEXT: mtlr r0
767 %0 = load fp128, fp128* @a_qp, align 16
768 %1 = load fp128, fp128* @b_qp, align 16
769 %cmp = fcmp oeq fp128 %0, %1
770 %cond = select i1 %cmp, fp128 %0, fp128 %1