1 ; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
3 ; CHECK: v{{[0-9]+}}.h = vadd(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
6 ; CHECK: v{{[0-9]+}} = valign(v{{[0-9]+}},v{{[0-9]+}},#2)
9 ; CHECK: v{{[0-9]+}} = valign(v{{[0-9]+}},v{{[0-9]+}},#2)
11 target triple = "hexagon"
13 @ZERO = global <16 x i32> zeroinitializer, align 64
15 define void @fred(i16* nocapture readonly %a0, i32 %a1, i32 %a2, i16* nocapture %a3) #0 {
17 %v5 = bitcast i16* %a0 to <16 x i32>*
18 %v6 = getelementptr inbounds i16, i16* %a0, i32 %a1
19 %v7 = bitcast i16* %v6 to <16 x i32>*
20 %v8 = mul nsw i32 %a1, 2
21 %v9 = getelementptr inbounds i16, i16* %a0, i32 %v8
22 %v10 = bitcast i16* %v9 to <16 x i32>*
23 %v11 = load <16 x i32>, <16 x i32>* %v5, align 64, !tbaa !1
24 %v12 = load <16 x i32>, <16 x i32>* %v7, align 64, !tbaa !1
25 %v13 = load <16 x i32>, <16 x i32>* %v10, align 64, !tbaa !1
26 %v14 = load <16 x i32>, <16 x i32>* @ZERO, align 64, !tbaa !1
27 %v15 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %v14, <16 x i32> %v14)
28 %v16 = sdiv i32 %a2, 32
29 %v17 = icmp sgt i32 %a2, 31
30 br i1 %v17, label %b18, label %b66
33 %v19 = add i32 %v8, 32
34 %v20 = add i32 %a1, 32
35 %v21 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v12, <16 x i32> %v12)
36 %v22 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v11, <16 x i32> %v13)
37 %v23 = getelementptr inbounds i16, i16* %a0, i32 %v19
38 %v24 = getelementptr inbounds i16, i16* %a0, i32 %v20
39 %v25 = getelementptr inbounds i16, i16* %a0, i32 32
40 %v26 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %v11, <16 x i32> %v13)
41 %v27 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v22, <16 x i32> %v21)
42 %v28 = bitcast i16* %v23 to <16 x i32>*
43 %v29 = bitcast i16* %v24 to <16 x i32>*
44 %v30 = bitcast i16* %v25 to <16 x i32>*
45 %v31 = bitcast i16* %a3 to <16 x i32>*
48 b32: ; preds = %b32, %b18
49 %v33 = phi i32 [ 0, %b18 ], [ %v63, %b32 ]
50 %v34 = phi <16 x i32>* [ %v31, %b18 ], [ %v62, %b32 ]
51 %v35 = phi <16 x i32>* [ %v28, %b18 ], [ %v46, %b32 ]
52 %v36 = phi <16 x i32>* [ %v29, %b18 ], [ %v44, %b32 ]
53 %v37 = phi <16 x i32>* [ %v30, %b18 ], [ %v42, %b32 ]
54 %v38 = phi <16 x i32> [ %v15, %b18 ], [ %v39, %b32 ]
55 %v39 = phi <16 x i32> [ %v26, %b18 ], [ %v56, %b32 ]
56 %v40 = phi <16 x i32> [ %v27, %b18 ], [ %v51, %b32 ]
57 %v41 = phi <16 x i32> [ %v15, %b18 ], [ %v40, %b32 ]
58 %v42 = getelementptr inbounds <16 x i32>, <16 x i32>* %v37, i32 1
59 %v43 = load <16 x i32>, <16 x i32>* %v37, align 64, !tbaa !1
60 %v44 = getelementptr inbounds <16 x i32>, <16 x i32>* %v36, i32 1
61 %v45 = load <16 x i32>, <16 x i32>* %v36, align 64, !tbaa !1
62 %v46 = getelementptr inbounds <16 x i32>, <16 x i32>* %v35, i32 1
63 %v47 = load <16 x i32>, <16 x i32>* %v35, align 64, !tbaa !1
64 %v48 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v43, <16 x i32> %v47)
65 %v49 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v45, <16 x i32> %v45)
66 %v50 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v40, <16 x i32> %v41, i32 62)
67 %v51 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v48, <16 x i32> %v49)
68 %v52 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v51, <16 x i32> %v40, i32 2)
69 %v53 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffh(<16 x i32> %v50, <16 x i32> %v52)
70 %v54 = getelementptr inbounds <16 x i32>, <16 x i32>* %v34, i32 1
71 store <16 x i32> %v53, <16 x i32>* %v34, align 64, !tbaa !1
72 %v55 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v39, <16 x i32> %v38, i32 62)
73 %v56 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %v43, <16 x i32> %v47)
74 %v57 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v56, <16 x i32> %v39, i32 2)
75 %v58 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v39, <16 x i32> %v39)
76 %v59 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v58, <16 x i32> %v55)
77 %v60 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v59, <16 x i32> %v57)
78 %v61 = tail call <16 x i32> @llvm.hexagon.V6.vabsh(<16 x i32> %v60)
79 %v62 = getelementptr inbounds <16 x i32>, <16 x i32>* %v34, i32 2
80 store <16 x i32> %v61, <16 x i32>* %v54, align 64, !tbaa !1
81 %v63 = add nsw i32 %v33, 1
82 %v64 = icmp slt i32 %v63, %v16
83 br i1 %v64, label %b32, label %b65
88 b66: ; preds = %b65, %b4
92 declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #1
93 declare <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32>, <16 x i32>) #1
94 declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>, i32) #1
95 declare <16 x i32> @llvm.hexagon.V6.vabsdiffh(<16 x i32>, <16 x i32>) #1
96 declare <16 x i32> @llvm.hexagon.V6.vabsh(<16 x i32>) #1
98 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
99 attributes #1 = { nounwind readnone }
101 !1 = !{!2, !2, i64 0}
102 !2 = !{!"omnipotent char", !3, i64 0}
103 !3 = !{!"Simple C/C++ TBAA"}