1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown | FileCheck %s -check-prefix=PWR9
3 ; RUN: llc < %s -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown | FileCheck %s -check-prefix=PWR8
5 @a = internal global fp128 0xL00000000000000000000000000000000, align 16
6 @x = internal global [4 x fp128] zeroinitializer, align 16
7 @y = internal global [4 x fp128] zeroinitializer, align 16
9 define void @fmul_ctrloop_fp128() nounwind {
10 ; PWR9-LABEL: fmul_ctrloop_fp128:
11 ; PWR9: # %bb.0: # %entry
12 ; PWR9-NEXT: addis 3, 2, a@toc@ha
14 ; PWR9-NEXT: addi 3, 3, a@toc@l
15 ; PWR9-NEXT: lxv 34, 0(3)
16 ; PWR9-NEXT: addis 3, 2, y@toc@ha
18 ; PWR9-NEXT: addis 4, 2, x@toc@ha
19 ; PWR9-NEXT: addi 3, 3, y@toc@l
20 ; PWR9-NEXT: addi 4, 4, x@toc@l
21 ; PWR9-NEXT: addi 3, 3, -16
22 ; PWR9-NEXT: addi 4, 4, -16
23 ; PWR9-NEXT: .p2align 5
24 ; PWR9-NEXT: .LBB0_1: # %for.body
26 ; PWR9-NEXT: lxv 35, 16(4)
27 ; PWR9-NEXT: addi 4, 4, 16
28 ; PWR9-NEXT: xsmulqp 3, 2, 3
29 ; PWR9-NEXT: stxv 35, 16(3)
30 ; PWR9-NEXT: addi 3, 3, 16
31 ; PWR9-NEXT: bdnz .LBB0_1
32 ; PWR9-NEXT: # %bb.2: # %for.end
35 ; PWR8-LABEL: fmul_ctrloop_fp128:
36 ; PWR8: # %bb.0: # %entry
38 ; PWR8-NEXT: stdu 1, -112(1)
40 ; PWR8-NEXT: std 0, 128(1)
41 ; PWR8-NEXT: std 28, 80(1) # 8-byte Folded Spill
42 ; PWR8-NEXT: std 29, 88(1) # 8-byte Folded Spill
43 ; PWR8-NEXT: std 30, 96(1) # 8-byte Folded Spill
45 ; PWR8-NEXT: li 29, 16
46 ; PWR8-NEXT: std 26, 64(1) # 8-byte Folded Spill
47 ; PWR8-NEXT: stxvd2x 63, 1, 3 # 16-byte Folded Spill
48 ; PWR8-NEXT: addis 3, 2, a@toc@ha
49 ; PWR8-NEXT: addi 3, 3, a@toc@l
50 ; PWR8-NEXT: std 27, 72(1) # 8-byte Folded Spill
51 ; PWR8-NEXT: lxvd2x 0, 0, 3
52 ; PWR8-NEXT: addis 3, 2, y@toc@ha
53 ; PWR8-NEXT: addi 3, 3, y@toc@l
54 ; PWR8-NEXT: addi 28, 3, -16
55 ; PWR8-NEXT: addis 3, 2, x@toc@ha
56 ; PWR8-NEXT: addi 3, 3, x@toc@l
57 ; PWR8-NEXT: addi 3, 3, -16
58 ; PWR8-NEXT: xxswapd 63, 0
59 ; PWR8-NEXT: .p2align 4
60 ; PWR8-NEXT: .LBB0_1: # %for.body
62 ; PWR8-NEXT: lxvd2x 0, 3, 29
63 ; PWR8-NEXT: vmr 2, 31
64 ; PWR8-NEXT: addi 27, 28, 16
65 ; PWR8-NEXT: addi 26, 3, 16
66 ; PWR8-NEXT: xxswapd 35, 0
67 ; PWR8-NEXT: bl __mulkf3
69 ; PWR8-NEXT: addi 30, 30, -1
70 ; PWR8-NEXT: xxswapd 0, 34
72 ; PWR8-NEXT: cmpldi 30, 0
73 ; PWR8-NEXT: stxvd2x 0, 28, 29
74 ; PWR8-NEXT: mr 28, 27
75 ; PWR8-NEXT: bc 12, 1, .LBB0_1
76 ; PWR8-NEXT: # %bb.2: # %for.end
78 ; PWR8-NEXT: ld 30, 96(1) # 8-byte Folded Reload
79 ; PWR8-NEXT: ld 29, 88(1) # 8-byte Folded Reload
80 ; PWR8-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
81 ; PWR8-NEXT: ld 28, 80(1) # 8-byte Folded Reload
82 ; PWR8-NEXT: ld 27, 72(1) # 8-byte Folded Reload
83 ; PWR8-NEXT: ld 26, 64(1) # 8-byte Folded Reload
84 ; PWR8-NEXT: addi 1, 1, 112
85 ; PWR8-NEXT: ld 0, 16(1)
89 %0 = load fp128, ptr @a, align 16
92 for.body: ; preds = %for.body, %entry
93 %i.06 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
94 %arrayidx = getelementptr inbounds [4 x fp128], ptr @x, i64 0, i64 %i.06
95 %1 = load fp128, ptr %arrayidx, align 16
96 %mul = fmul fp128 %0, %1
97 %arrayidx1 = getelementptr inbounds [4 x fp128], ptr @y, i64 0, i64 %i.06
98 store fp128 %mul, ptr %arrayidx1, align 16
99 %inc = add nuw nsw i64 %i.06, 1
100 %exitcond = icmp eq i64 %inc, 4
101 br i1 %exitcond, label %for.end, label %for.body
103 for.end: ; preds = %for.body
107 define void @fpext_ctrloop_fp128(ptr %a) nounwind {
108 ; PWR9-LABEL: fpext_ctrloop_fp128:
109 ; PWR9: # %bb.0: # %entry
111 ; PWR9-NEXT: addi 3, 3, -8
113 ; PWR9-NEXT: addis 4, 2, y@toc@ha
114 ; PWR9-NEXT: addi 4, 4, y@toc@l
115 ; PWR9-NEXT: addi 4, 4, -16
116 ; PWR9-NEXT: .p2align 5
117 ; PWR9-NEXT: .LBB1_1: # %for.body
119 ; PWR9-NEXT: lfdu 0, 8(3)
120 ; PWR9-NEXT: xscpsgndp 34, 0, 0
121 ; PWR9-NEXT: xscvdpqp 2, 2
122 ; PWR9-NEXT: stxv 34, 16(4)
123 ; PWR9-NEXT: addi 4, 4, 16
124 ; PWR9-NEXT: bdnz .LBB1_1
125 ; PWR9-NEXT: # %bb.2: # %for.end
128 ; PWR8-LABEL: fpext_ctrloop_fp128:
129 ; PWR8: # %bb.0: # %entry
131 ; PWR8-NEXT: std 28, -32(1) # 8-byte Folded Spill
132 ; PWR8-NEXT: std 29, -24(1) # 8-byte Folded Spill
133 ; PWR8-NEXT: std 30, -16(1) # 8-byte Folded Spill
134 ; PWR8-NEXT: stdu 1, -64(1)
135 ; PWR8-NEXT: addi 30, 3, -8
136 ; PWR8-NEXT: addis 3, 2, y@toc@ha
137 ; PWR8-NEXT: li 29, 4
138 ; PWR8-NEXT: std 0, 80(1)
139 ; PWR8-NEXT: addi 3, 3, y@toc@l
140 ; PWR8-NEXT: addi 28, 3, -16
141 ; PWR8-NEXT: .p2align 4
142 ; PWR8-NEXT: .LBB1_1: # %for.body
144 ; PWR8-NEXT: lfdu 1, 8(30)
145 ; PWR8-NEXT: addi 28, 28, 16
146 ; PWR8-NEXT: bl __extenddfkf2
148 ; PWR8-NEXT: addi 29, 29, -1
149 ; PWR8-NEXT: xxswapd 0, 34
150 ; PWR8-NEXT: cmpldi 29, 0
151 ; PWR8-NEXT: stxvd2x 0, 0, 28
152 ; PWR8-NEXT: bc 12, 1, .LBB1_1
153 ; PWR8-NEXT: # %bb.2: # %for.end
154 ; PWR8-NEXT: addi 1, 1, 64
155 ; PWR8-NEXT: ld 0, 16(1)
156 ; PWR8-NEXT: ld 30, -16(1) # 8-byte Folded Reload
157 ; PWR8-NEXT: ld 29, -24(1) # 8-byte Folded Reload
158 ; PWR8-NEXT: ld 28, -32(1) # 8-byte Folded Reload
165 %i.06 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
166 %arrayidx = getelementptr inbounds double, ptr %a, i64 %i.06
167 %0 = load double, ptr %arrayidx, align 8
168 %ext = fpext double %0 to fp128
169 %arrayidx1 = getelementptr inbounds [4 x fp128], ptr @y, i64 0, i64 %i.06
170 store fp128 %ext, ptr %arrayidx1, align 16
171 %inc = add nuw nsw i64 %i.06, 1
172 %exitcond = icmp eq i64 %inc, 4
173 br i1 %exitcond, label %for.end, label %for.body
179 define void @fptrunc_ctrloop_fp128(ptr %a) nounwind {
180 ; PWR9-LABEL: fptrunc_ctrloop_fp128:
181 ; PWR9: # %bb.0: # %entry
183 ; PWR9-NEXT: addi 3, 3, -8
185 ; PWR9-NEXT: addis 4, 2, x@toc@ha
186 ; PWR9-NEXT: addi 4, 4, x@toc@l
187 ; PWR9-NEXT: addi 4, 4, -16
188 ; PWR9-NEXT: .p2align 5
189 ; PWR9-NEXT: .LBB2_1: # %for.body
191 ; PWR9-NEXT: lxv 34, 16(4)
192 ; PWR9-NEXT: addi 4, 4, 16
193 ; PWR9-NEXT: xscvqpdp 2, 2
194 ; PWR9-NEXT: xscpsgndp 0, 34, 34
195 ; PWR9-NEXT: stfdu 0, 8(3)
196 ; PWR9-NEXT: bdnz .LBB2_1
197 ; PWR9-NEXT: # %bb.2: # %for.end
200 ; PWR8-LABEL: fptrunc_ctrloop_fp128:
201 ; PWR8: # %bb.0: # %entry
203 ; PWR8-NEXT: std 28, -32(1) # 8-byte Folded Spill
204 ; PWR8-NEXT: std 29, -24(1) # 8-byte Folded Spill
205 ; PWR8-NEXT: std 30, -16(1) # 8-byte Folded Spill
206 ; PWR8-NEXT: stdu 1, -64(1)
207 ; PWR8-NEXT: addi 30, 3, -8
208 ; PWR8-NEXT: addis 3, 2, x@toc@ha
209 ; PWR8-NEXT: li 29, 4
210 ; PWR8-NEXT: std 0, 80(1)
211 ; PWR8-NEXT: addi 3, 3, x@toc@l
212 ; PWR8-NEXT: addi 28, 3, -16
213 ; PWR8-NEXT: .p2align 4
214 ; PWR8-NEXT: .LBB2_1: # %for.body
216 ; PWR8-NEXT: addi 28, 28, 16
217 ; PWR8-NEXT: lxvd2x 0, 0, 28
218 ; PWR8-NEXT: xxswapd 34, 0
219 ; PWR8-NEXT: bl __trunckfdf2
221 ; PWR8-NEXT: addi 29, 29, -1
222 ; PWR8-NEXT: stfdu 1, 8(30)
223 ; PWR8-NEXT: cmpldi 29, 0
224 ; PWR8-NEXT: bc 12, 1, .LBB2_1
225 ; PWR8-NEXT: # %bb.2: # %for.end
226 ; PWR8-NEXT: addi 1, 1, 64
227 ; PWR8-NEXT: ld 0, 16(1)
228 ; PWR8-NEXT: ld 30, -16(1) # 8-byte Folded Reload
229 ; PWR8-NEXT: ld 29, -24(1) # 8-byte Folded Reload
230 ; PWR8-NEXT: ld 28, -32(1) # 8-byte Folded Reload
237 %i.06 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
238 %arrayidx = getelementptr inbounds [4 x fp128], ptr @x, i64 0, i64 %i.06
239 %0 = load fp128, ptr %arrayidx, align 16
240 %trunc = fptrunc fp128 %0 to double
241 %arrayidx1 = getelementptr inbounds double, ptr %a, i64 %i.06
242 store double %trunc, ptr %arrayidx1, align 16
243 %inc = add nuw nsw i64 %i.06, 1
244 %exitcond = icmp eq i64 %inc, 4
245 br i1 %exitcond, label %for.end, label %for.body
251 declare void @obfuscate(ptr, ...) local_unnamed_addr #2