1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -ppc-vsr-nums-as-vr -mtriple=powerpc64-unknown-linux-gnu \
3 ; RUN: -verify-machineinstrs -ppc-asm-full-reg-names -mcpu=pwr9 --ppc-enable-pipeliner \
6 @x = dso_local local_unnamed_addr global <{ i32, i32, i32, i32, [1020 x i32] }> <{ i32 1, i32 2, i32 3, i32 4, [1020 x i32] zeroinitializer }>, align 4
7 @y = dso_local global [1024 x i32] zeroinitializer, align 4
9 ; Function Attrs: norecurse nounwind
10 define dso_local i32* @foo() local_unnamed_addr #0 {
12 ; CHECK: # %bb.0: # %entry
13 ; CHECK-NEXT: addis r5, r2, x@toc@ha
14 ; CHECK-NEXT: addi r5, r5, x@toc@l
15 ; CHECK-NEXT: addis r6, r2, y@toc@ha
16 ; CHECK-NEXT: li r7, 340
17 ; CHECK-NEXT: addi r3, r6, y@toc@l
18 ; CHECK-NEXT: lwz r6, y@toc@l(r6)
19 ; CHECK-NEXT: mtctr r7
20 ; CHECK-NEXT: addi r5, r5, -8
21 ; CHECK-NEXT: lwzu r7, 12(r5)
22 ; CHECK-NEXT: maddld r6, r7, r7, r6
23 ; CHECK-NEXT: lwz r7, 4(r5)
24 ; CHECK-NEXT: addi r4, r3, -8
25 ; CHECK-NEXT: stwu r6, 12(r4)
26 ; CHECK-NEXT: maddld r6, r7, r7, r6
27 ; CHECK-NEXT: lwz r7, 8(r5)
28 ; CHECK-NEXT: .p2align 4
29 ; CHECK-NEXT: .LBB0_1: # %for.body
30 ; CHECK: maddld r7, r7, r7, r6
31 ; CHECK-NEXT: lwzu r8, 12(r5)
32 ; CHECK-NEXT: maddld r8, r8, r8, r7
33 ; CHECK-NEXT: stw r6, 4(r4)
34 ; CHECK-NEXT: lwz r6, 4(r5)
35 ; CHECK-NEXT: maddld r6, r6, r6, r8
36 ; CHECK-NEXT: stw r7, 8(r4)
37 ; CHECK-NEXT: lwz r7, 8(r5)
38 ; CHECK-NEXT: stwu r8, 12(r4)
39 ; CHECK-NEXT: bdnz .LBB0_1
40 ; CHECK-NEXT: # %bb.2:
41 ; CHECK-NEXT: maddld r5, r7, r7, r6
42 ; CHECK-NEXT: stw r6, 4(r4)
43 ; CHECK-NEXT: stw r5, 8(r4)
46 %.pre = load i32, i32* getelementptr inbounds ([1024 x i32], [1024 x i32]* @y, i64 0, i64 0), align 4
49 for.cond.cleanup: ; preds = %for.body
50 ret i32* getelementptr inbounds ([1024 x i32], [1024 x i32]* @y, i64 0, i64 0)
52 for.body: ; preds = %for.body, %entry
53 %0 = phi i32 [ %.pre, %entry ], [ %add.2, %for.body ]
54 %indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next.2, %for.body ]
55 %arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv
56 %1 = load i32, i32* %arrayidx2, align 4
57 %mul = mul nsw i32 %1, %1
58 %add = add nsw i32 %mul, %0
59 %arrayidx6 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv
60 store i32 %add, i32* %arrayidx6, align 4
61 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
62 %arrayidx2.1 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv.next
63 %2 = load i32, i32* %arrayidx2.1, align 4
64 %mul.1 = mul nsw i32 %2, %2
65 %add.1 = add nsw i32 %mul.1, %add
66 %arrayidx6.1 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv.next
67 store i32 %add.1, i32* %arrayidx6.1, align 4
68 %indvars.iv.next.1 = add nuw nsw i64 %indvars.iv, 2
69 %arrayidx2.2 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv.next.1
70 %3 = load i32, i32* %arrayidx2.2, align 4
71 %mul.2 = mul nsw i32 %3, %3
72 %add.2 = add nsw i32 %mul.2, %add.1
73 %arrayidx6.2 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv.next.1
74 store i32 %add.2, i32* %arrayidx6.2, align 4
75 %indvars.iv.next.2 = add nuw nsw i64 %indvars.iv, 3
76 %exitcond.2 = icmp eq i64 %indvars.iv.next.2, 1024
77 br i1 %exitcond.2, label %for.cond.cleanup, label %for.body