1 ; RUN: opt < %s -loop-vectorize -S | FileCheck %s
19 target datalayout = "e-m:e-i64:64-n32:64"
20 target triple = "powerpc64le-ibm-linux-gnu"
22 define void @QLA_F3_r_veq_norm2_V(float* noalias nocapture %r, [3 x { float, float }]* noalias nocapture readonly %a, i32 signext %n) #0 {
24 %cmp24 = icmp sgt i32 %n, 0
25 br i1 %cmp24, label %for.cond1.preheader.preheader, label %for.end13
27 for.cond1.preheader.preheader: ; preds = %entry
28 br label %for.cond1.preheader
30 for.cond1.preheader: ; preds = %for.cond1.preheader.preheader, %for.cond1.preheader
31 %indvars.iv = phi i64 [ %indvars.iv.next, %for.cond1.preheader ], [ 0, %for.cond1.preheader.preheader ]
32 %sum.026 = phi double [ %add10.2, %for.cond1.preheader ], [ 0.000000e+00, %for.cond1.preheader.preheader ]
33 %arrayidx5.realp = getelementptr inbounds [3 x { float, float }], [3 x { float, float }]* %a, i64 %indvars.iv, i64 0, i32 0
34 %arrayidx5.real = load float, float* %arrayidx5.realp, align 8
35 %arrayidx5.imagp = getelementptr inbounds [3 x { float, float }], [3 x { float, float }]* %a, i64 %indvars.iv, i64 0, i32 1
36 %arrayidx5.imag = load float, float* %arrayidx5.imagp, align 8
37 %mul = fmul fast float %arrayidx5.real, %arrayidx5.real
38 %mul9 = fmul fast float %arrayidx5.imag, %arrayidx5.imag
39 %add = fadd fast float %mul9, %mul
40 %conv = fpext float %add to double
41 %add10 = fadd fast double %conv, %sum.026
42 %arrayidx5.realp.1 = getelementptr inbounds [3 x { float, float }], [3 x { float, float }]* %a, i64 %indvars.iv, i64 1, i32 0
43 %arrayidx5.real.1 = load float, float* %arrayidx5.realp.1, align 8
44 %arrayidx5.imagp.1 = getelementptr inbounds [3 x { float, float }], [3 x { float, float }]* %a, i64 %indvars.iv, i64 1, i32 1
45 %arrayidx5.imag.1 = load float, float* %arrayidx5.imagp.1, align 8
46 %mul.1 = fmul fast float %arrayidx5.real.1, %arrayidx5.real.1
47 %mul9.1 = fmul fast float %arrayidx5.imag.1, %arrayidx5.imag.1
48 %add.1 = fadd fast float %mul9.1, %mul.1
49 %conv.1 = fpext float %add.1 to double
50 %add10.1 = fadd fast double %conv.1, %add10
51 %arrayidx5.realp.2 = getelementptr inbounds [3 x { float, float }], [3 x { float, float }]* %a, i64 %indvars.iv, i64 2, i32 0
52 %arrayidx5.real.2 = load float, float* %arrayidx5.realp.2, align 8
53 %arrayidx5.imagp.2 = getelementptr inbounds [3 x { float, float }], [3 x { float, float }]* %a, i64 %indvars.iv, i64 2, i32 1
54 %arrayidx5.imag.2 = load float, float* %arrayidx5.imagp.2, align 8
55 %mul.2 = fmul fast float %arrayidx5.real.2, %arrayidx5.real.2
56 %mul9.2 = fmul fast float %arrayidx5.imag.2, %arrayidx5.imag.2
57 %add.2 = fadd fast float %mul9.2, %mul.2
58 %conv.2 = fpext float %add.2 to double
59 %add10.2 = fadd fast double %conv.2, %add10.1
60 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
61 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
62 %exitcond = icmp eq i32 %lftr.wideiv, %n
63 br i1 %exitcond, label %for.cond.for.end13_crit_edge, label %for.cond1.preheader
65 for.cond.for.end13_crit_edge: ; preds = %for.cond1.preheader
66 %add10.2.lcssa = phi double [ %add10.2, %for.cond1.preheader ]
67 %phitmp = fptrunc double %add10.2.lcssa to float
70 for.end13: ; preds = %for.cond.for.end13_crit_edge, %entry
71 %sum.0.lcssa = phi float [ %phitmp, %for.cond.for.end13_crit_edge ], [ 0.000000e+00, %entry ]
72 store float %sum.0.lcssa, float* %r, align 4