1 ; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
2 ; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
3 ; RUN: llc -march=hexagon -O1 < %s | FileCheck %s
4 ; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
6 ; CHECK-NOT: v{{[0-9]*}}.cur
9 ; CHECK: v{{[0-9]+}}.h = vasr(v{{[0-9]+}}.w,v{{[0-9]+}}.w,r{{[0-7]+}})
13 ; CHECK: v{{[0-9]+}}.h = vasr(v{{[0-9]+}}.w,v{{[0-9]+}}.w,r{{[0-7]+}})
16 ; CHECK: v{{[0-9]+}} = v{{[0-9]+}}
18 target triple = "hexagon"
20 ; Function Attrs: nounwind
21 define void @f0(i8* nocapture readonly %a0, i8* nocapture readonly %a1, i32 %a2, i8* nocapture %a3, i32 %a4) #0 {
23 %v0 = bitcast i8* %a1 to i32*
24 %v1 = load i32, i32* %v0, align 4, !tbaa !0
25 %v2 = getelementptr inbounds i8, i8* %a1, i32 4
26 %v3 = bitcast i8* %v2 to i32*
27 %v4 = load i32, i32* %v3, align 4, !tbaa !0
28 %v5 = getelementptr inbounds i8, i8* %a1, i32 8
29 %v6 = bitcast i8* %v5 to i32*
30 %v7 = load i32, i32* %v6, align 4, !tbaa !0
32 %v9 = add i32 %v8, %a4
33 %v10 = icmp sgt i32 %a4, 0
34 br i1 %v10, label %b1, label %b4
37 %v11 = getelementptr inbounds i8, i8* %a0, i32 %v9
38 %v12 = getelementptr inbounds i8, i8* %a0, i32 %v8
39 %v13 = getelementptr inbounds i8, i8* %a0, i32 %a4
40 %v14 = add i32 %v9, 64
41 %v15 = bitcast i8* %v11 to <16 x i32>*
42 %v16 = add i32 %v8, 64
43 %v17 = bitcast i8* %v12 to <16 x i32>*
44 %v18 = add i32 %a4, 64
45 %v19 = bitcast i8* %v13 to <16 x i32>*
46 %v20 = bitcast i8* %a0 to <16 x i32>*
47 %v21 = getelementptr inbounds i8, i8* %a0, i32 %v14
48 %v22 = load <16 x i32>, <16 x i32>* %v15, align 64, !tbaa !4
49 %v23 = getelementptr inbounds i8, i8* %a0, i32 %v16
50 %v24 = load <16 x i32>, <16 x i32>* %v17, align 64, !tbaa !4
51 %v25 = getelementptr inbounds i8, i8* %a0, i32 %v18
52 %v26 = load <16 x i32>, <16 x i32>* %v19, align 64, !tbaa !4
53 %v27 = load <16 x i32>, <16 x i32>* %v20, align 64, !tbaa !4
54 %v28 = getelementptr inbounds i8, i8* %a3, i32 %a4
57 b2: ; preds = %b2, %b1
58 %v29 = phi i8* [ %a0, %b1 ], [ %v40, %b2 ]
59 %v30 = phi i8* [ %a3, %b1 ], [ %v74, %b2 ]
60 %v31 = phi i8* [ %v25, %b1 ], [ %v45, %b2 ]
61 %v32 = phi i8* [ %v23, %b1 ], [ %v48, %b2 ]
62 %v33 = phi i8* [ %v21, %b1 ], [ %v51, %b2 ]
63 %v34 = phi i8* [ %v28, %b1 ], [ %v89, %b2 ]
64 %v35 = phi i32 [ 0, %b1 ], [ %v90, %b2 ]
65 %v36 = phi <16 x i32> [ %v27, %b1 ], [ %v42, %b2 ]
66 %v37 = phi <16 x i32> [ %v26, %b1 ], [ %v44, %b2 ]
67 %v38 = phi <16 x i32> [ %v24, %b1 ], [ %v47, %b2 ]
68 %v39 = phi <16 x i32> [ %v22, %b1 ], [ %v50, %b2 ]
69 %v40 = getelementptr inbounds i8, i8* %v29, i32 64
70 %v41 = bitcast i8* %v40 to <16 x i32>*
71 %v42 = load <16 x i32>, <16 x i32>* %v41, align 64, !tbaa !4
72 %v43 = bitcast i8* %v31 to <16 x i32>*
73 %v44 = load <16 x i32>, <16 x i32>* %v43, align 64, !tbaa !4
74 %v45 = getelementptr inbounds i8, i8* %v31, i32 64
75 %v46 = bitcast i8* %v32 to <16 x i32>*
76 %v47 = load <16 x i32>, <16 x i32>* %v46, align 64, !tbaa !4
77 %v48 = getelementptr inbounds i8, i8* %v32, i32 64
78 %v49 = bitcast i8* %v33 to <16 x i32>*
79 %v50 = load <16 x i32>, <16 x i32>* %v49, align 64, !tbaa !4
80 %v51 = getelementptr inbounds i8, i8* %v33, i32 64
81 %v52 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v42, <16 x i32> %v36, i32 4)
82 %v53 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v44, <16 x i32> %v37, i32 4)
83 %v54 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v47, <16 x i32> %v38, i32 4)
84 %v55 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v50, <16 x i32> %v39, i32 4)
85 %v56 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v52, <16 x i32> %v36)
86 %v57 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v53, <16 x i32> %v37)
87 %v58 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v54, <16 x i32> %v38)
88 %v59 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v55, <16 x i32> %v39)
89 %v60 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v56, i32 %v1, i32 0)
90 %v61 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v56, i32 %v1, i32 1)
91 %v62 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v60, <32 x i32> %v57, i32 %v4, i32 0)
92 %v63 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v61, <32 x i32> %v57, i32 %v4, i32 1)
93 %v64 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v62, <32 x i32> %v58, i32 %v7, i32 0)
94 %v65 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v63, <32 x i32> %v58, i32 %v7, i32 1)
95 %v66 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v65)
96 %v67 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v65)
97 %v68 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %v66, <16 x i32> %v67, i32 %a2)
98 %v69 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v64)
99 %v70 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v64)
100 %v71 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %v69, <16 x i32> %v70, i32 %a2)
101 %v72 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v68, <16 x i32> %v71)
102 %v73 = bitcast i8* %v30 to <16 x i32>*
103 store <16 x i32> %v72, <16 x i32>* %v73, align 64, !tbaa !4
104 %v74 = getelementptr inbounds i8, i8* %v30, i32 64
105 %v75 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v57, i32 %v1, i32 0)
106 %v76 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v57, i32 %v1, i32 1)
107 %v77 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v75, <32 x i32> %v58, i32 %v4, i32 0)
108 %v78 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v76, <32 x i32> %v58, i32 %v4, i32 1)
109 %v79 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v77, <32 x i32> %v59, i32 %v7, i32 0)
110 %v80 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v78, <32 x i32> %v59, i32 %v7, i32 1)
111 %v81 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v80)
112 %v82 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v80)
113 %v83 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %v81, <16 x i32> %v82, i32 %a2)
114 %v84 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v79)
115 %v85 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v79)
116 %v86 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %v84, <16 x i32> %v85, i32 %a2)
117 %v87 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v83, <16 x i32> %v86)
118 %v88 = bitcast i8* %v34 to <16 x i32>*
119 store <16 x i32> %v87, <16 x i32>* %v88, align 64, !tbaa !4
120 %v89 = getelementptr inbounds i8, i8* %v34, i32 64
121 %v90 = add nsw i32 %v35, 64
122 %v91 = icmp slt i32 %v90, %a4
123 br i1 %v91, label %b2, label %b3
128 b4: ; preds = %b3, %b0
132 ; Function Attrs: nounwind readnone
133 declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x i32>, i32) #1
135 ; Function Attrs: nounwind readnone
136 declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
138 ; Function Attrs: nounwind readnone
139 declare <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32>, i32, i32) #1
141 ; Function Attrs: nounwind readnone
142 declare <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32>, <32 x i32>, i32, i32) #1
144 ; Function Attrs: nounwind readnone
145 declare <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32>, <16 x i32>, i32) #1
147 ; Function Attrs: nounwind readnone
148 declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
150 ; Function Attrs: nounwind readnone
151 declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
153 ; Function Attrs: nounwind readnone
154 declare <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>, <16 x i32>) #1
156 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
157 attributes #1 = { nounwind readnone }
159 !0 = !{!1, !1, i64 0}
160 !1 = !{!"int", !2, i64 0}
161 !2 = !{!"omnipotent char", !3, i64 0}
162 !3 = !{!"Simple C/C++ TBAA"}
163 !4 = !{!2, !2, i64 0}