1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -verify-machineinstrs -mtriple powerpc64le-unknown-linux-gnu \
3 ; RUN: -mcpu=pwr10 -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names < %s \
5 ; RUN: llc -verify-machineinstrs -mtriple powerpc64le-unknown-linux-gnu \
6 ; RUN: -mcpu=pwr10 -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names \
7 ; RUN: -ppc-track-subreg-liveness < %s | FileCheck %s --check-prefix=TRACKLIVE
10 %1 = type <{ double }>
12 define void @acc_regalloc(i32* %arg, [0 x %0]* %arg1, [0 x %1]* %arg2) local_unnamed_addr {
13 ; CHECK-LABEL: acc_regalloc:
14 ; CHECK: # %bb.0: # %bb
15 ; CHECK-NEXT: lwz r3, 0(r3)
16 ; CHECK-NEXT: lxv v4, 0(0)
17 ; CHECK-NEXT: xxlxor v0, v0, v0
18 ; CHECK-NEXT: xxlxor v1, v1, v1
19 ; CHECK-NEXT: stfd f14, -144(r1) # 8-byte Folded Spill
20 ; CHECK-NEXT: stfd f15, -136(r1) # 8-byte Folded Spill
21 ; CHECK-NEXT: xxlxor v2, v2, v2
22 ; CHECK-NEXT: li r6, 1
23 ; CHECK-NEXT: li r4, 16
24 ; CHECK-NEXT: extswsli r3, r3, 3
25 ; CHECK-NEXT: xvmaddadp v1, v4, v1
26 ; CHECK-NEXT: lxvdsx v5, 0, r3
27 ; CHECK-NEXT: xvmaddadp v0, v5, v0
28 ; CHECK-NEXT: .p2align 4
29 ; CHECK-NEXT: .LBB0_1: # %bb9
31 ; CHECK-NEXT: addi r6, r6, 2
32 ; CHECK-NEXT: lxv vs1, -64(r5)
33 ; CHECK-NEXT: lxv vs2, -16(r5)
34 ; CHECK-NEXT: lxv vs0, 16(0)
35 ; CHECK-NEXT: vmr v9, v0
36 ; CHECK-NEXT: xxlxor v10, v10, v10
37 ; CHECK-NEXT: xxlxor v7, v7, v7
38 ; CHECK-NEXT: mulld r6, r6, r3
39 ; CHECK-NEXT: xvmaddadp v9, vs1, v2
40 ; CHECK-NEXT: xxlxor v8, v8, v8
41 ; CHECK-NEXT: xvmaddadp v10, vs2, v10
42 ; CHECK-NEXT: xvmaddadp v7, vs0, v5
43 ; CHECK-NEXT: xvmuldp v6, vs0, v2
44 ; CHECK-NEXT: xvmaddadp v7, v2, v2
45 ; CHECK-NEXT: xvmaddadp v6, v2, v2
46 ; CHECK-NEXT: lxvdsx v14, r6, r4
47 ; CHECK-NEXT: xvmaddadp v8, vs1, v8
48 ; CHECK-NEXT: li r6, 0
49 ; CHECK-NEXT: xvmuldp v11, vs2, v14
50 ; CHECK-NEXT: xvmuldp v3, vs1, v14
51 ; CHECK-NEXT: xvmuldp vs5, v14, v2
52 ; CHECK-NEXT: xvmuldp v13, v4, v14
53 ; CHECK-NEXT: vmr v12, v2
54 ; CHECK-NEXT: xxlor vs14, v10, v10
55 ; CHECK-NEXT: xxlor vs0, v2, v2
56 ; CHECK-NEXT: xxlor vs4, v2, v2
57 ; CHECK-NEXT: # kill: def $vsrp2 killed $vsrp2 def $uacc1
58 ; CHECK-NEXT: xxlor vs6, v6, v6
59 ; CHECK-NEXT: xxlor vs7, v7, v7
60 ; CHECK-NEXT: xxlor vs8, v12, v12
61 ; CHECK-NEXT: xxlor vs9, v13, v13
62 ; CHECK-NEXT: vmr v12, v1
63 ; CHECK-NEXT: xxlor vs15, v11, v11
64 ; CHECK-NEXT: vmr v10, v2
65 ; CHECK-NEXT: xxlor vs1, v3, v3
66 ; CHECK-NEXT: xxlor vs2, v8, v8
67 ; CHECK-NEXT: xxlor vs3, v9, v9
68 ; CHECK-NEXT: xxlor vs10, v12, v12
69 ; CHECK-NEXT: xxlor vs11, v13, v13
70 ; CHECK-NEXT: xxmtacc acc1
71 ; CHECK-NEXT: xxlor vs12, v10, v10
72 ; CHECK-NEXT: xxlor vs13, v11, v11
73 ; CHECK-NEXT: xxmtacc acc0
74 ; CHECK-NEXT: xxmtacc acc2
75 ; CHECK-NEXT: xvf64gerpp acc0, vsp34, vs0
76 ; CHECK-NEXT: xxmtacc acc3
77 ; CHECK-NEXT: xvf64gerpp acc1, vsp34, vs0
78 ; CHECK-NEXT: xvf64gerpp acc2, vsp34, vs0
79 ; CHECK-NEXT: xvf64gerpp acc3, vsp34, vs0
80 ; CHECK-NEXT: xvf64gerpp acc0, vsp34, vs0
81 ; CHECK-NEXT: xvf64gerpp acc1, vsp34, vs0
82 ; CHECK-NEXT: xvf64gerpp acc2, vsp34, vs0
83 ; CHECK-NEXT: xvf64gerpp acc3, vsp34, vs0
84 ; CHECK-NEXT: xvf64gerpp acc0, vsp34, vs0
85 ; CHECK-NEXT: xvf64gerpp acc1, vsp34, vs0
86 ; CHECK-NEXT: xvf64gerpp acc2, vsp34, vs0
87 ; CHECK-NEXT: xvf64gerpp acc3, vsp34, vs0
88 ; CHECK-NEXT: xvf64gerpp acc0, vsp34, vs0
89 ; CHECK-NEXT: xvf64gerpp acc1, vsp34, vs0
90 ; CHECK-NEXT: xvf64gerpp acc2, vsp34, vs0
91 ; CHECK-NEXT: xvf64gerpp acc3, vsp34, vs0
92 ; CHECK-NEXT: xvf64gerpp acc0, vsp34, vs0
93 ; CHECK-NEXT: xvf64gerpp acc1, vsp34, vs0
94 ; CHECK-NEXT: xvf64gerpp acc2, vsp34, vs0
95 ; CHECK-NEXT: xvf64gerpp acc3, vsp34, vs0
96 ; CHECK-NEXT: xvf64gerpp acc0, vsp34, vs0
97 ; CHECK-NEXT: xvf64gerpp acc1, vsp34, vs0
98 ; CHECK-NEXT: xvf64gerpp acc2, vsp34, vs0
99 ; CHECK-NEXT: xvf64gerpp acc3, vsp34, vs0
100 ; CHECK-NEXT: xvf64gerpp acc0, vsp34, vs0
101 ; CHECK-NEXT: xvf64gerpp acc1, vsp34, vs0
102 ; CHECK-NEXT: xvf64gerpp acc2, vsp34, vs0
103 ; CHECK-NEXT: xvf64gerpp acc3, vsp34, vs0
104 ; CHECK-NEXT: xxmfacc acc0
105 ; CHECK-NEXT: xxmfacc acc1
106 ; CHECK-NEXT: xxmfacc acc2
107 ; CHECK-NEXT: xxmfacc acc3
108 ; CHECK-NEXT: stxv vs1, 0(r3)
109 ; CHECK-NEXT: stxv vs9, 32(r3)
110 ; CHECK-NEXT: stxv vs4, 16(0)
111 ; CHECK-NEXT: stxv vs12, 48(0)
112 ; CHECK-NEXT: b .LBB0_1
114 ; TRACKLIVE-LABEL: acc_regalloc:
115 ; TRACKLIVE: # %bb.0: # %bb
116 ; TRACKLIVE-NEXT: lwz r3, 0(r3)
117 ; TRACKLIVE-NEXT: lxv v4, 0(0)
118 ; TRACKLIVE-NEXT: xxlxor v0, v0, v0
119 ; TRACKLIVE-NEXT: xxlxor v1, v1, v1
120 ; TRACKLIVE-NEXT: stfd f14, -144(r1) # 8-byte Folded Spill
121 ; TRACKLIVE-NEXT: stfd f15, -136(r1) # 8-byte Folded Spill
122 ; TRACKLIVE-NEXT: xxlxor v2, v2, v2
123 ; TRACKLIVE-NEXT: li r6, 1
124 ; TRACKLIVE-NEXT: li r4, 16
125 ; TRACKLIVE-NEXT: extswsli r3, r3, 3
126 ; TRACKLIVE-NEXT: xvmaddadp v1, v4, v1
127 ; TRACKLIVE-NEXT: lxvdsx v5, 0, r3
128 ; TRACKLIVE-NEXT: xvmaddadp v0, v5, v0
129 ; TRACKLIVE-NEXT: .p2align 4
130 ; TRACKLIVE-NEXT: .LBB0_1: # %bb9
132 ; TRACKLIVE-NEXT: addi r6, r6, 2
133 ; TRACKLIVE-NEXT: lxv vs0, 16(0)
134 ; TRACKLIVE-NEXT: xxlxor vs7, vs7, vs7
135 ; TRACKLIVE-NEXT: lxv vs1, -64(r5)
136 ; TRACKLIVE-NEXT: lxv vs4, -16(r5)
137 ; TRACKLIVE-NEXT: xxlxor vs12, vs12, vs12
138 ; TRACKLIVE-NEXT: xxlor vs3, v0, v0
139 ; TRACKLIVE-NEXT: xxlxor vs2, vs2, vs2
140 ; TRACKLIVE-NEXT: mulld r6, r6, r3
141 ; TRACKLIVE-NEXT: xxlor vs10, v2, v2
142 ; TRACKLIVE-NEXT: xxlor vs8, vs10, vs10
143 ; TRACKLIVE-NEXT: xxlor vs10, v1, v1
144 ; TRACKLIVE-NEXT: xvmaddadp vs7, vs0, v5
145 ; TRACKLIVE-NEXT: xvmuldp vs6, vs0, v2
146 ; TRACKLIVE-NEXT: xvmaddadp vs12, vs4, vs12
147 ; TRACKLIVE-NEXT: xvmaddadp vs3, vs1, v2
148 ; TRACKLIVE-NEXT: xvmaddadp vs2, vs1, vs2
149 ; TRACKLIVE-NEXT: xxlor vs0, v2, v2
150 ; TRACKLIVE-NEXT: lxvdsx v6, r6, r4
151 ; TRACKLIVE-NEXT: li r6, 0
152 ; TRACKLIVE-NEXT: xvmaddadp vs7, v2, v2
153 ; TRACKLIVE-NEXT: xvmaddadp vs6, v2, v2
154 ; TRACKLIVE-NEXT: xxlor vs14, vs12, vs12
155 ; TRACKLIVE-NEXT: xxlor vs12, v2, v2
156 ; TRACKLIVE-NEXT: xvmuldp v3, vs1, v6
157 ; TRACKLIVE-NEXT: xvmuldp vs11, v4, v6
158 ; TRACKLIVE-NEXT: xvmuldp vs13, vs4, v6
159 ; TRACKLIVE-NEXT: xvmuldp vs5, v6, v2
160 ; TRACKLIVE-NEXT: xxlor vs4, v2, v2
161 ; TRACKLIVE-NEXT: xxlor vs1, v3, v3
162 ; TRACKLIVE-NEXT: xxlor vs9, vs11, vs11
163 ; TRACKLIVE-NEXT: xxlor vs15, vs13, vs13
164 ; TRACKLIVE-NEXT: xxmtacc acc1
165 ; TRACKLIVE-NEXT: xxmtacc acc0
166 ; TRACKLIVE-NEXT: xxmtacc acc2
167 ; TRACKLIVE-NEXT: xxmtacc acc3
168 ; TRACKLIVE-NEXT: xvf64gerpp acc0, vsp34, vs0
169 ; TRACKLIVE-NEXT: xvf64gerpp acc1, vsp34, vs0
170 ; TRACKLIVE-NEXT: xvf64gerpp acc2, vsp34, vs0
171 ; TRACKLIVE-NEXT: xvf64gerpp acc3, vsp34, vs0
172 ; TRACKLIVE-NEXT: xvf64gerpp acc0, vsp34, vs0
173 ; TRACKLIVE-NEXT: xvf64gerpp acc1, vsp34, vs0
174 ; TRACKLIVE-NEXT: xvf64gerpp acc2, vsp34, vs0
175 ; TRACKLIVE-NEXT: xvf64gerpp acc3, vsp34, vs0
176 ; TRACKLIVE-NEXT: xvf64gerpp acc0, vsp34, vs0
177 ; TRACKLIVE-NEXT: xvf64gerpp acc1, vsp34, vs0
178 ; TRACKLIVE-NEXT: xvf64gerpp acc2, vsp34, vs0
179 ; TRACKLIVE-NEXT: xvf64gerpp acc3, vsp34, vs0
180 ; TRACKLIVE-NEXT: xvf64gerpp acc0, vsp34, vs0
181 ; TRACKLIVE-NEXT: xvf64gerpp acc1, vsp34, vs0
182 ; TRACKLIVE-NEXT: xvf64gerpp acc2, vsp34, vs0
183 ; TRACKLIVE-NEXT: xvf64gerpp acc3, vsp34, vs0
184 ; TRACKLIVE-NEXT: xvf64gerpp acc0, vsp34, vs0
185 ; TRACKLIVE-NEXT: xvf64gerpp acc1, vsp34, vs0
186 ; TRACKLIVE-NEXT: xvf64gerpp acc2, vsp34, vs0
187 ; TRACKLIVE-NEXT: xvf64gerpp acc3, vsp34, vs0
188 ; TRACKLIVE-NEXT: xvf64gerpp acc0, vsp34, vs0
189 ; TRACKLIVE-NEXT: xvf64gerpp acc1, vsp34, vs0
190 ; TRACKLIVE-NEXT: xvf64gerpp acc2, vsp34, vs0
191 ; TRACKLIVE-NEXT: xvf64gerpp acc3, vsp34, vs0
192 ; TRACKLIVE-NEXT: xvf64gerpp acc0, vsp34, vs0
193 ; TRACKLIVE-NEXT: xvf64gerpp acc1, vsp34, vs0
194 ; TRACKLIVE-NEXT: xvf64gerpp acc2, vsp34, vs0
195 ; TRACKLIVE-NEXT: xvf64gerpp acc3, vsp34, vs0
196 ; TRACKLIVE-NEXT: xxmfacc acc0
197 ; TRACKLIVE-NEXT: xxmfacc acc1
198 ; TRACKLIVE-NEXT: xxmfacc acc2
199 ; TRACKLIVE-NEXT: xxmfacc acc3
200 ; TRACKLIVE-NEXT: stxv vs1, 0(r3)
201 ; TRACKLIVE-NEXT: stxv vs9, 32(r3)
202 ; TRACKLIVE-NEXT: stxv vs4, 16(0)
203 ; TRACKLIVE-NEXT: stxv vs12, 48(0)
204 ; TRACKLIVE-NEXT: b .LBB0_1
206 %i = load i32, i32* %arg, align 4
207 %i3 = sext i32 %i to i64
208 %i4 = shl nsw i64 %i3, 3
209 %i5 = bitcast [0 x %0]* %arg1 to i8*
210 %i6 = getelementptr i8, i8* %i5, i64 undef
211 %i7 = getelementptr [0 x %1], [0 x %1]* %arg2, i64 0, i64 -8
212 %i8 = getelementptr i8, i8* %i6, i64 undef
215 bb9: ; preds = %bb95, %bb
216 %i10 = phi i64 [ 1, %bb ], [ 0, %bb95 ]
217 %i11 = getelementptr %1, %1* null, i64 2
218 %i12 = bitcast %1* %i11 to <2 x double>*
219 %i13 = load <2 x double>, <2 x double>* %i12, align 1
220 %i14 = add nuw nsw i64 %i10, 2
221 %i15 = getelementptr inbounds %1, %1* %i7, i64 undef
222 %i16 = bitcast %1* %i15 to <2 x double>*
223 %i17 = load <2 x double>, <2 x double>* %i16, align 1
224 %i18 = load <2 x double>, <2 x double>* null, align 1
225 %i19 = getelementptr %1, %1* %i15, i64 6
226 %i20 = bitcast %1* %i19 to <2 x double>*
227 %i21 = load <2 x double>, <2 x double>* %i20, align 1
228 %i22 = load i64, i64* undef, align 8
229 %i23 = insertelement <2 x i64> poison, i64 %i22, i32 0
230 %i24 = bitcast <2 x i64> %i23 to <2 x double>
231 %i25 = shufflevector <2 x double> %i24, <2 x double> undef, <2 x i32> zeroinitializer
232 %i26 = mul i64 %i14, %i4
233 %i27 = getelementptr i8, i8* null, i64 %i26
234 %i28 = getelementptr inbounds i8, i8* %i27, i64 0
235 %i29 = getelementptr i8, i8* %i28, i64 16
236 %i30 = bitcast i8* %i29 to i64*
237 %i31 = load i64, i64* %i30, align 8
238 %i32 = insertelement <2 x i64> poison, i64 %i31, i32 0
239 %i33 = bitcast <2 x i64> %i32 to <2 x double>
240 %i34 = shufflevector <2 x double> %i33, <2 x double> undef, <2 x i32> zeroinitializer
241 %i35 = tail call contract <2 x double> @llvm.fma.v2f64(<2 x double> zeroinitializer, <2 x double> %i25, <2 x double> zeroinitializer)
242 %i36 = tail call contract <2 x double> @llvm.fma.v2f64(<2 x double> %i13, <2 x double> %i25, <2 x double> zeroinitializer)
243 %i37 = fmul contract <2 x double> %i13, zeroinitializer
244 %i38 = tail call contract <2 x double> @llvm.fma.v2f64(<2 x double> %i17, <2 x double> zeroinitializer, <2 x double> %i35)
245 %i39 = tail call contract <2 x double> @llvm.fma.v2f64(<2 x double> zeroinitializer, <2 x double> zeroinitializer, <2 x double> %i36)
246 %i40 = tail call contract <2 x double> @llvm.fma.v2f64(<2 x double> %i17, <2 x double> zeroinitializer, <2 x double> zeroinitializer)
247 %i41 = tail call contract <2 x double> @llvm.fma.v2f64(<2 x double> zeroinitializer, <2 x double> zeroinitializer, <2 x double> %i37)
248 %i42 = tail call contract <2 x double> @llvm.fma.v2f64(<2 x double> %i18, <2 x double> zeroinitializer, <2 x double> zeroinitializer)
249 %i43 = tail call contract <2 x double> @llvm.fma.v2f64(<2 x double> %i21, <2 x double> zeroinitializer, <2 x double> zeroinitializer)
250 %i44 = fmul contract <2 x double> %i17, %i34
251 %i45 = fmul contract <2 x double> zeroinitializer, %i34
252 %i46 = fmul contract <2 x double> %i18, %i34
253 %i47 = fmul contract <2 x double> %i21, %i34
254 %i48 = bitcast <2 x double> %i44 to <16 x i8>
255 %i49 = bitcast <2 x double> %i40 to <16 x i8>
256 %i50 = bitcast <2 x double> %i38 to <16 x i8>
257 %i51 = tail call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> zeroinitializer, <16 x i8> %i48, <16 x i8> %i49, <16 x i8> %i50)
258 %i52 = bitcast <2 x double> %i45 to <16 x i8>
259 %i53 = bitcast <2 x double> %i41 to <16 x i8>
260 %i54 = bitcast <2 x double> %i39 to <16 x i8>
261 %i55 = tail call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> zeroinitializer, <16 x i8> %i52, <16 x i8> %i53, <16 x i8> %i54)
262 %i56 = bitcast <2 x double> %i46 to <16 x i8>
263 %i57 = bitcast <2 x double> %i42 to <16 x i8>
264 %i58 = tail call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> zeroinitializer, <16 x i8> %i56, <16 x i8> %i57, <16 x i8> %i56)
265 %i59 = bitcast <2 x double> %i47 to <16 x i8>
266 %i60 = bitcast <2 x double> %i43 to <16 x i8>
267 %i61 = tail call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> zeroinitializer, <16 x i8> %i59, <16 x i8> %i60, <16 x i8> %i59)
268 %i62 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i51, <256 x i1> undef, <16 x i8> undef)
269 %i63 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i55, <256 x i1> undef, <16 x i8> undef)
270 %i64 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i58, <256 x i1> undef, <16 x i8> undef)
271 %i65 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i61, <256 x i1> undef, <16 x i8> undef)
272 %i66 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i62, <256 x i1> undef, <16 x i8> undef)
273 %i67 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i63, <256 x i1> undef, <16 x i8> undef)
274 %i68 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i64, <256 x i1> undef, <16 x i8> undef)
275 %i69 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i65, <256 x i1> undef, <16 x i8> undef)
276 %i70 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i66, <256 x i1> undef, <16 x i8> undef)
277 %i71 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i67, <256 x i1> undef, <16 x i8> undef)
278 %i72 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i68, <256 x i1> undef, <16 x i8> undef)
279 %i73 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i69, <256 x i1> undef, <16 x i8> undef)
280 %i74 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i70, <256 x i1> undef, <16 x i8> undef)
281 %i75 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i71, <256 x i1> undef, <16 x i8> undef)
282 %i76 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i72, <256 x i1> undef, <16 x i8> undef)
283 %i77 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i73, <256 x i1> undef, <16 x i8> undef)
284 %i78 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i74, <256 x i1> undef, <16 x i8> undef)
285 %i79 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i75, <256 x i1> undef, <16 x i8> undef)
286 %i80 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i76, <256 x i1> undef, <16 x i8> undef)
287 %i81 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i77, <256 x i1> undef, <16 x i8> undef)
290 bb82: ; preds = %bb82, %bb9
291 %i83 = phi <512 x i1> [ %i94, %bb82 ], [ %i81, %bb9 ]
292 %i84 = phi <512 x i1> [ %i93, %bb82 ], [ %i80, %bb9 ]
293 %i85 = phi <512 x i1> [ %i92, %bb82 ], [ %i79, %bb9 ]
294 %i86 = phi <512 x i1> [ %i91, %bb82 ], [ %i78, %bb9 ]
295 %i87 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i86, <256 x i1> undef, <16 x i8> undef)
296 %i88 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i85, <256 x i1> undef, <16 x i8> undef)
297 %i89 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i84, <256 x i1> undef, <16 x i8> undef)
298 %i90 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i83, <256 x i1> undef, <16 x i8> undef)
299 %i91 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i87, <256 x i1> undef, <16 x i8> undef)
300 %i92 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i88, <256 x i1> undef, <16 x i8> undef)
301 %i93 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i89, <256 x i1> undef, <16 x i8> undef)
302 %i94 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %i90, <256 x i1> undef, <16 x i8> undef)
303 br i1 undef, label %bb95, label %bb82
305 bb95: ; preds = %bb82
306 %i96 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1> %i91)
307 %i97 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %i96, 2
308 %i98 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1> %i92)
309 %i99 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %i98, 3
310 %i100 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1> %i93)
311 %i101 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %i100, 2
312 %i102 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1> %i94)
313 %i103 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %i102, 3
314 %i104 = getelementptr inbounds i8, i8* %i8, i64 undef
315 %i105 = bitcast i8* %i104 to <16 x i8>*
316 store <16 x i8> %i97, <16 x i8>* %i105, align 1
317 %i106 = getelementptr i8, i8* %i104, i64 32
318 %i107 = bitcast i8* %i106 to <16 x i8>*
319 store <16 x i8> %i101, <16 x i8>* %i107, align 1
320 %i108 = getelementptr i8, i8* null, i64 16
321 %i109 = bitcast i8* %i108 to <16 x i8>*
322 store <16 x i8> %i99, <16 x i8>* %i109, align 1
323 %i110 = getelementptr i8, i8* null, i64 48
324 %i111 = bitcast i8* %i110 to <16 x i8>*
325 store <16 x i8> %i103, <16 x i8>* %i111, align 1
329 declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>)
330 declare <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>)
331 declare <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1>, <256 x i1>, <16 x i8>)
332 declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1>)