1 ; RUN: llc -march=hexagon -O3 < %s
4 ; This used to assert in the register scavenger.
6 target triple = "hexagon-unknown-linux-gnu"
10 %2 = type { [4 x [4 x double]] }
11 %3 = type { [3 x double] }
12 %4 = type { %5, %0, %0, %5*, %3, %3 }
13 %5 = type { i32 (...)** }
16 declare void @f0(%3* sret, %0*, %3*)
18 ; Function Attrs: nounwind
19 define void @f1(%4* %a0, %0* nocapture %a1, %0* nocapture %a2) #0 align 2 {
21 %v0 = alloca %6, align 8
22 %v1 = alloca [2 x [2 x [2 x %3]]], align 8
23 %v2 = alloca %3, align 8
24 %v3 = getelementptr inbounds %4, %4* %a0, i32 0, i32 1
25 %v4 = bitcast %0* %v3 to i8*
26 %v5 = bitcast %0* %a1 to i8*
27 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v4, i8* align 8 %v5, i32 128, i1 false)
28 %v6 = getelementptr inbounds %4, %4* %a0, i32 0, i32 2
29 %v7 = bitcast %0* %v6 to i8*
30 %v8 = bitcast %0* %a2 to i8*
31 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v7, i8* align 8 %v8, i32 128, i1 false)
32 %v9 = bitcast %6* %v0 to i8*
33 call void @llvm.memset.p0i8.i64(i8* align 8 %v9, i8 0, i64 48, i1 false)
34 %v10 = getelementptr inbounds %4, %4* %a0, i32 0, i32 3
35 %v11 = load %5*, %5** %v10, align 4, !tbaa !0
36 %v12 = bitcast %5* %v11 to i32 (%5*, double, double, %6*)***
37 %v13 = load i32 (%5*, double, double, %6*)**, i32 (%5*, double, double, %6*)*** %v12, align 4, !tbaa !4
38 %v14 = getelementptr inbounds i32 (%5*, double, double, %6*)*, i32 (%5*, double, double, %6*)** %v13, i32 3
39 %v15 = load i32 (%5*, double, double, %6*)*, i32 (%5*, double, double, %6*)** %v14, align 4
40 %v16 = call i32 %v15(%5* %v11, double 0.000000e+00, double 0.000000e+00, %6* %v0)
41 %v17 = icmp eq i32 %v16, 0
42 br i1 %v17, label %b1, label %b3
45 %v18 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 0
46 store double -1.000000e+06, double* %v18, align 8, !tbaa !6
47 %v19 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 1
48 store double -1.000000e+06, double* %v19, align 8, !tbaa !6
49 %v20 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 2
50 store double -1.000000e+06, double* %v20, align 8, !tbaa !6
51 %v21 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 0
52 store double 1.000000e+06, double* %v21, align 8, !tbaa !6
53 %v22 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 1
54 store double 1.000000e+06, double* %v22, align 8, !tbaa !6
55 %v23 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 2
56 store double 1.000000e+06, double* %v23, align 8, !tbaa !6
59 b2: ; preds = %b3, %b1
63 %v24 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0
64 %v25 = bitcast [2 x [2 x [2 x %3]]]* %v1 to i8*
65 %v26 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 2
66 %v27 = bitcast %3* %v26 to i8*
67 %v28 = bitcast [2 x [2 x [2 x %3]]]* %v1 to i8*
68 call void @llvm.memset.p0i8.i64(i8* align 8 %v28, i8 0, i64 48, i1 false)
69 call void @llvm.memset.p0i8.i64(i8* align 8 %v27, i8 0, i64 24, i1 false)
70 %v29 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 3
71 %v30 = bitcast %3* %v29 to i8*
72 call void @llvm.memset.p0i8.i64(i8* align 8 %v30, i8 0, i64 24, i1 false)
73 %v31 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 4
74 %v32 = bitcast %3* %v31 to i8*
75 call void @llvm.memset.p0i8.i64(i8* align 8 %v32, i8 0, i64 24, i1 false)
76 %v33 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 5
77 %v34 = bitcast %3* %v33 to i8*
78 call void @llvm.memset.p0i8.i64(i8* align 8 %v34, i8 0, i64 24, i1 false)
79 %v35 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 6
80 %v36 = bitcast %3* %v35 to i8*
81 call void @llvm.memset.p0i8.i64(i8* align 8 %v36, i8 0, i64 24, i1 false)
82 %v37 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 7
83 %v38 = bitcast %3* %v37 to i8*
84 call void @llvm.memset.p0i8.i64(i8* align 8 %v38, i8 0, i64 24, i1 false)
85 %v39 = getelementptr inbounds %6, %6* %v0, i32 0, i32 0, i32 0, i32 0
86 %v40 = getelementptr inbounds %6, %6* %v0, i32 0, i32 0, i32 0, i32 1
87 %v41 = getelementptr inbounds %6, %6* %v0, i32 0, i32 0, i32 0, i32 2
88 %v42 = bitcast %3* %v2 to i8*
89 %v43 = getelementptr inbounds %6, %6* %v0, i32 0, i32 1, i32 0, i32 2
90 %v44 = getelementptr inbounds %6, %6* %v0, i32 0, i32 1, i32 0, i32 1
91 %v45 = getelementptr inbounds %6, %6* %v0, i32 0, i32 1, i32 0, i32 0
92 %v46 = load double, double* %v39, align 8, !tbaa !6
93 %v47 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0
94 store double %v46, double* %v47, align 8, !tbaa !6
95 %v48 = load double, double* %v40, align 8, !tbaa !6
96 %v49 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1
97 store double %v48, double* %v49, align 8, !tbaa !6
98 %v50 = load double, double* %v41, align 8, !tbaa !6
99 %v51 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 2
100 store double %v50, double* %v51, align 8, !tbaa !6
101 call void @f0(%3* sret %v2, %0* %v3, %3* %v24)
102 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v25, i8* align 8 %v42, i32 24, i1 false)
103 %v52 = load double, double* %v39, align 8, !tbaa !6
104 %v53 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0
105 store double %v52, double* %v53, align 8, !tbaa !6
106 %v54 = load double, double* %v40, align 8, !tbaa !6
107 %v55 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 1
108 store double %v54, double* %v55, align 8, !tbaa !6
109 %v56 = load double, double* %v43, align 8, !tbaa !6
110 %v57 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 2
111 store double %v56, double* %v57, align 8, !tbaa !6
112 %v58 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1
113 call void @f0(%3* sret %v2, %0* %v3, %3* %v58)
114 %v59 = bitcast %3* %v58 to i8*
115 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v59, i8* align 8 %v42, i32 24, i1 false)
116 %v60 = load double, double* %v39, align 8, !tbaa !6
117 %v61 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0
118 store double %v60, double* %v61, align 8, !tbaa !6
119 %v62 = load double, double* %v44, align 8, !tbaa !6
120 %v63 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 1
121 store double %v62, double* %v63, align 8, !tbaa !6
122 %v64 = load double, double* %v41, align 8, !tbaa !6
123 %v65 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 2
124 store double %v64, double* %v65, align 8, !tbaa !6
125 %v66 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0
126 call void @f0(%3* sret %v2, %0* %v3, %3* %v66)
127 %v67 = bitcast %3* %v66 to i8*
128 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v67, i8* align 8 %v42, i32 24, i1 false)
129 %v68 = load double, double* %v39, align 8, !tbaa !6
130 %v69 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 0
131 store double %v68, double* %v69, align 8, !tbaa !6
132 %v70 = load double, double* %v44, align 8, !tbaa !6
133 %v71 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 1
134 store double %v70, double* %v71, align 8, !tbaa !6
135 %v72 = load double, double* %v43, align 8, !tbaa !6
136 %v73 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 2
137 store double %v72, double* %v73, align 8, !tbaa !6
138 %v74 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1
139 call void @f0(%3* sret %v2, %0* %v3, %3* %v74)
140 %v75 = bitcast %3* %v74 to i8*
141 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v75, i8* align 8 %v42, i32 24, i1 false)
142 %v76 = load double, double* %v45, align 8, !tbaa !6
143 %v77 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0
144 store double %v76, double* %v77, align 8, !tbaa !6
145 %v78 = load double, double* %v40, align 8, !tbaa !6
146 %v79 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 1
147 store double %v78, double* %v79, align 8, !tbaa !6
148 %v80 = load double, double* %v41, align 8, !tbaa !6
149 %v81 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 2
150 store double %v80, double* %v81, align 8, !tbaa !6
151 %v82 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0
152 call void @f0(%3* sret %v2, %0* %v3, %3* %v82)
153 %v83 = bitcast %3* %v82 to i8*
154 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v83, i8* align 8 %v42, i32 24, i1 false)
155 %v84 = load double, double* %v45, align 8, !tbaa !6
156 %v85 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0
157 store double %v84, double* %v85, align 8, !tbaa !6
158 %v86 = load double, double* %v40, align 8, !tbaa !6
159 %v87 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1
160 store double %v86, double* %v87, align 8, !tbaa !6
161 %v88 = load double, double* %v43, align 8, !tbaa !6
162 %v89 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 2
163 store double %v88, double* %v89, align 8, !tbaa !6
164 %v90 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1
165 call void @f0(%3* sret %v2, %0* %v3, %3* %v90)
166 %v91 = bitcast %3* %v90 to i8*
167 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v91, i8* align 8 %v42, i32 24, i1 false)
168 %v92 = load double, double* %v45, align 8, !tbaa !6
169 %v93 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 0
170 store double %v92, double* %v93, align 8, !tbaa !6
171 %v94 = load double, double* %v44, align 8, !tbaa !6
172 %v95 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1
173 store double %v94, double* %v95, align 8, !tbaa !6
174 %v96 = load double, double* %v41, align 8, !tbaa !6
175 %v97 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 2
176 store double %v96, double* %v97, align 8, !tbaa !6
177 %v98 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0
178 call void @f0(%3* sret %v2, %0* %v3, %3* %v98)
179 %v99 = bitcast %3* %v98 to i8*
180 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v99, i8* align 8 %v42, i32 24, i1 false)
181 %v100 = load double, double* %v45, align 8, !tbaa !6
182 %v101 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 0
183 store double %v100, double* %v101, align 8, !tbaa !6
184 %v102 = load double, double* %v44, align 8, !tbaa !6
185 %v103 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 1
186 store double %v102, double* %v103, align 8, !tbaa !6
187 %v104 = load double, double* %v43, align 8, !tbaa !6
188 %v105 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 2
189 store double %v104, double* %v105, align 8, !tbaa !6
190 %v106 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1
191 call void @f0(%3* sret %v2, %0* %v3, %3* %v106)
192 %v107 = bitcast %3* %v106 to i8*
193 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v107, i8* align 8 %v42, i32 24, i1 false)
194 %v108 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0
195 %v109 = load double, double* %v108, align 8, !tbaa !6
196 %v110 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1
197 %v111 = load double, double* %v110, align 8, !tbaa !6
198 %v112 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 2
199 %v113 = load double, double* %v112, align 8, !tbaa !6
200 %v114 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0
201 %v115 = load double, double* %v114, align 8, !tbaa !6
202 %v116 = fcmp olt double %v115, %v109
203 %v117 = select i1 %v116, double %v115, double %v109
204 %v118 = fcmp ogt double %v115, %v109
205 %v119 = select i1 %v118, double %v115, double %v109
206 %v120 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 1
207 %v121 = load double, double* %v120, align 8, !tbaa !6
208 %v122 = fcmp olt double %v121, %v111
209 %v123 = select i1 %v122, double %v121, double %v111
210 %v124 = fcmp ogt double %v121, %v111
211 %v125 = select i1 %v124, double %v121, double %v111
212 %v126 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 2
213 %v127 = load double, double* %v126, align 8, !tbaa !6
214 %v128 = fcmp olt double %v127, %v113
215 %v129 = select i1 %v128, double %v127, double %v113
216 %v130 = fcmp ogt double %v127, %v113
217 %v131 = select i1 %v130, double %v127, double %v113
218 %v132 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0
219 %v133 = load double, double* %v132, align 8, !tbaa !6
220 %v134 = fcmp olt double %v133, %v117
221 %v135 = select i1 %v134, double %v133, double %v117
222 %v136 = fcmp ogt double %v133, %v119
223 %v137 = select i1 %v136, double %v133, double %v119
224 %v138 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 1
225 %v139 = load double, double* %v138, align 8, !tbaa !6
226 %v140 = fcmp olt double %v139, %v123
227 %v141 = select i1 %v140, double %v139, double %v123
228 %v142 = fcmp ogt double %v139, %v125
229 %v143 = select i1 %v142, double %v139, double %v125
230 %v144 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 2
231 %v145 = load double, double* %v144, align 8, !tbaa !6
232 %v146 = fcmp olt double %v145, %v129
233 %v147 = select i1 %v146, double %v145, double %v129
234 %v148 = fcmp ogt double %v145, %v131
235 %v149 = select i1 %v148, double %v145, double %v131
236 %v150 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 0
237 %v151 = load double, double* %v150, align 8, !tbaa !6
238 %v152 = fcmp olt double %v151, %v135
239 %v153 = select i1 %v152, double %v151, double %v135
240 %v154 = fcmp ogt double %v151, %v137
241 %v155 = select i1 %v154, double %v151, double %v137
242 %v156 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 1
243 %v157 = load double, double* %v156, align 8, !tbaa !6
244 %v158 = fcmp olt double %v157, %v141
245 %v159 = select i1 %v158, double %v157, double %v141
246 %v160 = fcmp ogt double %v157, %v143
247 %v161 = select i1 %v160, double %v157, double %v143
248 %v162 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 2
249 %v163 = load double, double* %v162, align 8, !tbaa !6
250 %v164 = fcmp olt double %v163, %v147
251 %v165 = select i1 %v164, double %v163, double %v147
252 %v166 = fcmp ogt double %v163, %v149
253 %v167 = select i1 %v166, double %v163, double %v149
254 %v168 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0
255 %v169 = load double, double* %v168, align 8, !tbaa !6
256 %v170 = fcmp olt double %v169, %v153
257 %v171 = select i1 %v170, double %v169, double %v153
258 %v172 = fcmp ogt double %v169, %v155
259 %v173 = select i1 %v172, double %v169, double %v155
260 %v174 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 1
261 %v175 = load double, double* %v174, align 8, !tbaa !6
262 %v176 = fcmp olt double %v175, %v159
263 %v177 = select i1 %v176, double %v175, double %v159
264 %v178 = fcmp ogt double %v175, %v161
265 %v179 = select i1 %v178, double %v175, double %v161
266 %v180 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 2
267 %v181 = load double, double* %v180, align 8, !tbaa !6
268 %v182 = fcmp olt double %v181, %v165
269 %v183 = select i1 %v182, double %v181, double %v165
270 %v184 = fcmp ogt double %v181, %v167
271 %v185 = select i1 %v184, double %v181, double %v167
272 %v186 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0
273 %v187 = load double, double* %v186, align 8, !tbaa !6
274 %v188 = fcmp olt double %v187, %v171
275 %v189 = select i1 %v188, double %v187, double %v171
276 %v190 = fcmp ogt double %v187, %v173
277 %v191 = select i1 %v190, double %v187, double %v173
278 %v192 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1
279 %v193 = load double, double* %v192, align 8, !tbaa !6
280 %v194 = fcmp olt double %v193, %v177
281 %v195 = select i1 %v194, double %v193, double %v177
282 %v196 = fcmp ogt double %v193, %v179
283 %v197 = select i1 %v196, double %v193, double %v179
284 %v198 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 2
285 %v199 = load double, double* %v198, align 8, !tbaa !6
286 %v200 = fcmp olt double %v199, %v183
287 %v201 = select i1 %v200, double %v199, double %v183
288 %v202 = fcmp ogt double %v199, %v185
289 %v203 = select i1 %v202, double %v199, double %v185
290 %v204 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 0
291 %v205 = load double, double* %v204, align 8, !tbaa !6
292 %v206 = fcmp olt double %v205, %v189
293 %v207 = select i1 %v206, double %v205, double %v189
294 %v208 = fcmp ogt double %v205, %v191
295 %v209 = select i1 %v208, double %v205, double %v191
296 %v210 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1
297 %v211 = load double, double* %v210, align 8, !tbaa !6
298 %v212 = fcmp olt double %v211, %v195
299 %v213 = select i1 %v212, double %v211, double %v195
300 %v214 = fcmp ogt double %v211, %v197
301 %v215 = select i1 %v214, double %v211, double %v197
302 %v216 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 2
303 %v217 = load double, double* %v216, align 8, !tbaa !6
304 %v218 = fcmp olt double %v217, %v201
305 %v219 = select i1 %v218, double %v217, double %v201
306 %v220 = fcmp ogt double %v217, %v203
307 %v221 = select i1 %v220, double %v217, double %v203
308 %v222 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 0
309 %v223 = load double, double* %v222, align 8, !tbaa !6
310 %v224 = fcmp olt double %v223, %v207
311 %v225 = select i1 %v224, double %v223, double %v207
312 %v226 = fcmp ogt double %v223, %v209
313 %v227 = select i1 %v226, double %v223, double %v209
314 %v228 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 1
315 %v229 = load double, double* %v228, align 8, !tbaa !6
316 %v230 = fcmp olt double %v229, %v213
317 %v231 = select i1 %v230, double %v229, double %v213
318 %v232 = fcmp ogt double %v229, %v215
319 %v233 = select i1 %v232, double %v229, double %v215
320 %v234 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 2
321 %v235 = load double, double* %v234, align 8, !tbaa !6
322 %v236 = fcmp olt double %v235, %v219
323 %v237 = select i1 %v236, double %v235, double %v219
324 %v238 = fcmp ogt double %v235, %v221
325 %v239 = select i1 %v238, double %v235, double %v221
326 %v240 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 0
327 store double %v225, double* %v240, align 8
328 %v241 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 1
329 store double %v231, double* %v241, align 8
330 %v242 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 2
331 store double %v237, double* %v242, align 8
332 %v243 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 0
333 store double %v227, double* %v243, align 8
334 %v244 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 1
335 store double %v233, double* %v244, align 8
336 %v245 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 2
337 store double %v239, double* %v245, align 8
341 ; Function Attrs: argmemonly nounwind
342 declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
344 ; Function Attrs: argmemonly nounwind
345 declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #1
347 attributes #0 = { nounwind "target-cpu"="hexagonv55" }
348 attributes #1 = { argmemonly nounwind }
350 !0 = !{!1, !1, i64 0}
351 !1 = !{!"any pointer", !2}
352 !2 = !{!"omnipotent char", !3}
353 !3 = !{!"Simple C/C++ TBAA"}
354 !4 = !{!5, !5, i64 0}
355 !5 = !{!"vtable pointer", !3}
356 !6 = !{!7, !7, i64 0}
357 !7 = !{!"double", !2}