1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mcpu=pwr9 -verify-machineinstrs -ppc-asm-full-reg-names \
3 ; RUN: -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s --check-prefix=P9LE
4 ; RUN: llc -mcpu=pwr9 -verify-machineinstrs -ppc-asm-full-reg-names \
5 ; RUN: -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s --check-prefix=P9BE
6 ; RUN: llc -mcpu=pwr8 -verify-machineinstrs -ppc-asm-full-reg-names \
7 ; RUN: -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s --check-prefix=P8LE
8 ; RUN: llc -mcpu=pwr8 -verify-machineinstrs -ppc-asm-full-reg-names \
9 ; RUN: -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s --check-prefix=P8BE
11 declare ppc_fp128 @llvm.fmuladd.ppcf128(ppc_fp128, ppc_fp128, ppc_fp128) #2
13 define ppc_fp128 @test_ctr0() {
14 ; P9LE-LABEL: test_ctr0:
15 ; P9LE: # %bb.0: # %bb
17 ; P9LE-NEXT: .cfi_def_cfa_offset 48
18 ; P9LE-NEXT: .cfi_offset lr, 16
19 ; P9LE-NEXT: .cfi_offset r30, -16
20 ; P9LE-NEXT: std r30, -16(r1) # 8-byte Folded Spill
21 ; P9LE-NEXT: stdu r1, -48(r1)
23 ; P9LE-NEXT: xxlxor f1, f1, f1
24 ; P9LE-NEXT: xxlxor f2, f2, f2
25 ; P9LE-NEXT: std r0, 64(r1)
26 ; P9LE-NEXT: rldic r30, r3, 62, 1
27 ; P9LE-NEXT: .p2align 5
28 ; P9LE-NEXT: .LBB0_1: # %bb6
30 ; P9LE-NEXT: xxlxor f3, f3, f3
31 ; P9LE-NEXT: xxlxor f4, f4, f4
32 ; P9LE-NEXT: bl __gcc_qadd
34 ; P9LE-NEXT: addi r30, r30, -1
35 ; P9LE-NEXT: cmpldi r30, 0
36 ; P9LE-NEXT: bc 12, gt, .LBB0_1
37 ; P9LE-NEXT: # %bb.2: # %bb14
38 ; P9LE-NEXT: addi r1, r1, 48
39 ; P9LE-NEXT: ld r0, 16(r1)
40 ; P9LE-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
44 ; P9BE-LABEL: test_ctr0:
45 ; P9BE: # %bb.0: # %bb
47 ; P9BE-NEXT: stdu r1, -128(r1)
48 ; P9BE-NEXT: std r0, 144(r1)
49 ; P9BE-NEXT: .cfi_def_cfa_offset 128
50 ; P9BE-NEXT: .cfi_offset lr, 16
51 ; P9BE-NEXT: .cfi_offset r30, -16
53 ; P9BE-NEXT: std r30, 112(r1) # 8-byte Folded Spill
54 ; P9BE-NEXT: xxlxor f1, f1, f1
55 ; P9BE-NEXT: rldic r30, r3, 62, 1
56 ; P9BE-NEXT: xxlxor f2, f2, f2
57 ; P9BE-NEXT: .p2align 5
58 ; P9BE-NEXT: .LBB0_1: # %bb6
60 ; P9BE-NEXT: xxlxor f3, f3, f3
61 ; P9BE-NEXT: xxlxor f4, f4, f4
62 ; P9BE-NEXT: bl __gcc_qadd
64 ; P9BE-NEXT: addi r30, r30, -1
65 ; P9BE-NEXT: cmpldi r30, 0
66 ; P9BE-NEXT: bc 12, gt, .LBB0_1
67 ; P9BE-NEXT: # %bb.2: # %bb14
68 ; P9BE-NEXT: ld r30, 112(r1) # 8-byte Folded Reload
69 ; P9BE-NEXT: addi r1, r1, 128
70 ; P9BE-NEXT: ld r0, 16(r1)
74 ; P8LE-LABEL: test_ctr0:
75 ; P8LE: # %bb.0: # %bb
77 ; P8LE-NEXT: .cfi_def_cfa_offset 48
78 ; P8LE-NEXT: .cfi_offset lr, 16
79 ; P8LE-NEXT: .cfi_offset r30, -16
80 ; P8LE-NEXT: std r30, -16(r1) # 8-byte Folded Spill
81 ; P8LE-NEXT: stdu r1, -48(r1)
83 ; P8LE-NEXT: xxlxor f1, f1, f1
84 ; P8LE-NEXT: xxlxor f2, f2, f2
85 ; P8LE-NEXT: std r0, 64(r1)
86 ; P8LE-NEXT: rldic r30, r3, 62, 1
87 ; P8LE-NEXT: .p2align 5
88 ; P8LE-NEXT: .LBB0_1: # %bb6
90 ; P8LE-NEXT: xxlxor f3, f3, f3
91 ; P8LE-NEXT: xxlxor f4, f4, f4
92 ; P8LE-NEXT: bl __gcc_qadd
94 ; P8LE-NEXT: addi r30, r30, -1
95 ; P8LE-NEXT: cmpldi r30, 0
96 ; P8LE-NEXT: bc 12, gt, .LBB0_1
97 ; P8LE-NEXT: # %bb.2: # %bb14
98 ; P8LE-NEXT: addi r1, r1, 48
99 ; P8LE-NEXT: ld r0, 16(r1)
100 ; P8LE-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
104 ; P8BE-LABEL: test_ctr0:
105 ; P8BE: # %bb.0: # %bb
107 ; P8BE-NEXT: stdu r1, -128(r1)
108 ; P8BE-NEXT: std r0, 144(r1)
109 ; P8BE-NEXT: .cfi_def_cfa_offset 128
110 ; P8BE-NEXT: .cfi_offset lr, 16
111 ; P8BE-NEXT: .cfi_offset r30, -16
112 ; P8BE-NEXT: li r3, 1
113 ; P8BE-NEXT: std r30, 112(r1) # 8-byte Folded Spill
114 ; P8BE-NEXT: xxlxor f1, f1, f1
115 ; P8BE-NEXT: xxlxor f2, f2, f2
116 ; P8BE-NEXT: rldic r30, r3, 62, 1
117 ; P8BE-NEXT: .p2align 5
118 ; P8BE-NEXT: .LBB0_1: # %bb6
120 ; P8BE-NEXT: xxlxor f3, f3, f3
121 ; P8BE-NEXT: xxlxor f4, f4, f4
122 ; P8BE-NEXT: bl __gcc_qadd
124 ; P8BE-NEXT: addi r30, r30, -1
125 ; P8BE-NEXT: cmpldi r30, 0
126 ; P8BE-NEXT: bc 12, gt, .LBB0_1
127 ; P8BE-NEXT: # %bb.2: # %bb14
128 ; P8BE-NEXT: ld r30, 112(r1) # 8-byte Folded Reload
129 ; P8BE-NEXT: addi r1, r1, 128
130 ; P8BE-NEXT: ld r0, 16(r1)
136 bb6: ; preds = %bb6, %bb
137 %i = phi ppc_fp128 [ %i8, %bb6 ], [ 0xM00000000000000000000000000000000, %bb ]
138 %i7 = phi i64 [ %i9, %bb6 ], [ 0, %bb ]
139 %i8 = tail call ppc_fp128 @llvm.fmuladd.ppcf128(ppc_fp128 0xM00000000000000000000000000000000, ppc_fp128 0xM00000000000000000000000000000000, ppc_fp128 %i) #4
140 %i9 = add i64 %i7, -4
141 %i10 = icmp eq i64 %i9, 0
142 br i1 %i10, label %bb14, label %bb6