1 ; RUN: llc -march=hexagon -mcpu=hexagonv60 -O2 -disable-post-ra < %s | FileCheck %s
3 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
4 ; CHECK: q{{[0-3]}} |= vand(v{{[0-9]*}},r{{[0-9]*}})
5 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
6 ; CHECK: q{{[0-3]}} = vcmp.eq(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
7 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
8 ; CHECK: q{{[0-3]}} = vcmp.eq(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
9 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
10 ; CHECK: q{{[0-3]}} = vcmp.eq(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
11 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
12 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
13 ; CHECK: q{{[0-3]}} &= vcmp.eq(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
14 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
15 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
16 ; CHECK: q{{[0-3]}} &= vcmp.eq(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
17 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
18 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
19 ; CHECK: q{{[0-3]}} &= vcmp.eq(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
20 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
21 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
22 ; CHECK: q{{[0-3]}} |= vcmp.eq(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
23 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
24 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
25 ; CHECK: q{{[0-3]}} |= vcmp.eq(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
26 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
27 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
28 ; CHECK: q{{[0-3]}} |= vcmp.eq(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
29 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
30 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
31 ; CHECK: q{{[0-3]}} ^= vcmp.eq(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
32 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
33 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
34 ; CHECK: q{{[0-3]}} ^= vcmp.eq(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
35 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
36 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
37 ; CHECK: q{{[0-3]}} ^= vcmp.eq(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
38 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
39 ; CHECK: q{{[0-3]}} = vcmp.gt(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
40 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
41 ; CHECK: q{{[0-3]}} = vcmp.gt(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
42 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
43 ; CHECK: q{{[0-3]}} = vcmp.gt(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub)
44 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
45 ; CHECK: q{{[0-3]}} = vcmp.gt(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh)
46 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
47 ; CHECK: q{{[0-3]}} = vcmp.gt(v{{[0-9]*}}.uw,v{{[0-9]*}}.uw)
48 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
49 ; CHECK: q{{[0-3]}} = vcmp.gt(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
50 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
51 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
52 ; CHECK: q{{[0-3]}} &= vcmp.gt(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
53 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
54 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
55 ; CHECK: q{{[0-3]}} &= vcmp.gt(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
56 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
57 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
58 ; CHECK: q{{[0-3]}} &= vcmp.gt(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub)
59 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
60 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
61 ; CHECK: q{{[0-3]}} &= vcmp.gt(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh)
62 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
63 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
64 ; CHECK: q{{[0-3]}} &= vcmp.gt(v{{[0-9]*}}.uw,v{{[0-9]*}}.uw)
65 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
66 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
67 ; CHECK: q{{[0-3]}} &= vcmp.gt(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
68 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
69 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
70 ; CHECK: q{{[0-3]}} |= vcmp.gt(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
71 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
72 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
73 ; CHECK: q{{[0-3]}} |= vcmp.gt(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
74 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
75 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
76 ; CHECK: q{{[0-3]}} |= vcmp.gt(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub)
77 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
78 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
79 ; CHECK: q{{[0-3]}} |= vcmp.gt(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh)
80 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
81 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
82 ; CHECK: q{{[0-3]}} |= vcmp.gt(v{{[0-9]*}}.uw,v{{[0-9]*}}.uw)
83 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
84 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
85 ; CHECK: q{{[0-3]}} |= vcmp.gt(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
86 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
87 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
88 ; CHECK: q{{[0-3]}} ^= vcmp.gt(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
89 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
90 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
91 ; CHECK: q{{[0-3]}} ^= vcmp.gt(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
92 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
93 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
94 ; CHECK: q{{[0-3]}} ^= vcmp.gt(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub)
95 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
96 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
97 ; CHECK: q{{[0-3]}} ^= vcmp.gt(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh)
98 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
99 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
100 ; CHECK: q{{[0-3]}} ^= vcmp.gt(v{{[0-9]*}}.uw,v{{[0-9]*}}.uw)
101 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
102 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
103 ; CHECK: q{{[0-3]}} ^= vcmp.gt(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
104 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
105 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
106 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
107 ; CHECK: q{{[0-3]}} = xor{{[0-9]*}}(q{{[0-3]}},q{{[0-3]}})
108 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
109 ; CHECK: v{{[0-9]*}} = v
110 ; CHECK: v{{[0-9]*}} = valign(v{{[0-9]*}},v{{[0-9]*}},#1)
111 ; CHECK: v{{[0-9]*}} = valign(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}})
112 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
113 ; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}})
114 ; CHECK: v{{[0-9]*}} = vand(v{{[0-9]*}},v{{[0-9]*}})
115 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
116 ; CHECK: v{{[0-9]*}} |= vand(q{{[0-3]}},r{{[0-9]*}})
117 ; CHECK: v{{[0-9]*}} = vdelta(v{{[0-9]*}},v{{[0-9]*}})
118 ; CHECK: v{{[0-9]*}} = vlalign(v{{[0-9]*}},v{{[0-9]*}},#1)
119 ; CHECK: v{{[0-9]*}} = vlalign(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}})
120 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
121 ; CHECK: v{{[0-9]*}} = vmux(q{{[0-3]}},v{{[0-9]*}},v{{[0-9]*}})
122 ; CHECK: v{{[0-9]*}} = vnot(v{{[0-9]*}})
123 ; CHECK: v{{[0-9]*}} = vor{{[0-9]*}}(v{{[0-9]*}},v{{[0-9]*}})
124 ; CHECK: v{{[0-9]*}} = vr{{[0-9]*}}delta(v{{[0-9]*}},v{{[0-9]*}})
125 ; CHECK: v{{[0-9]*}} = vr{{[0-9]*}}or{{[0-9]*}}(v{{[0-9]*}},r{{[0-9]*}})
126 ; CHECK: v{{[0-9]*}} = vxor{{[0-9]*}}(v{{[0-9]*}},v{{[0-9]*}})
127 ; CHECK: v{{[0-9]*}}.b = vadd(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
128 ; CHECK: v{{[0-9]*}}.b = vasr{{[0-9]*}}(v{{[0-9]*}}.h,v{{[0-9]*}}.h,r{{[0-9]*}}):{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat
129 ; CHECK: v{{[0-9]*}}.b = vdeal(v{{[0-9]*}}.b)
130 ; CHECK: v{{[0-9]*}}.b = vdeale(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
131 ; CHECK: v{{[0-9]*}}.b = vlut32(v{{[0-9]*}}.b,v{{[0-9]*}}.b,r{{[0-9]*}})
132 ; CHECK: v{{[0-9]*}}.b |= vlut32(v{{[0-9]*}}.b,v{{[0-9]*}}.b,r{{[0-9]*}})
133 ; CHECK: v{{[0-9]*}}.b = vnav{{[0-9]*}}g(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub)
134 ; CHECK: v{{[0-9]*}}.b = vpack(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat
135 ; CHECK: v{{[0-9]*}}.b = vpacke(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
136 ; CHECK: v{{[0-9]*}}.b = vpacko(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
137 ; CHECK: v{{[0-9]*}}.b = vr{{[0-9]*}}ound(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat
138 ; CHECK: v{{[0-9]*}}.b = vshuff(v{{[0-9]*}}.b)
139 ; CHECK: v{{[0-9]*}}.b = vshuffe(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
140 ; CHECK: v{{[0-9]*}}.b = vshuffo(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
141 ; CHECK: v{{[0-9]*}}.b = vsub(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
142 ; CHECK: v{{[0-9]*}}.h = vabs(v{{[0-9]*}}.h)
143 ; CHECK: v{{[0-9]*}}.h = vabs(v{{[0-9]*}}.h):{{[0-9]*}}sat
144 ; CHECK: v{{[0-9]*}}.h = vadd(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
145 ; CHECK: v{{[0-9]*}}.h = vadd(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat
146 ; CHECK: v{{[0-9]*}}.h = vasl(v{{[0-9]*}}.h,r{{[0-9]*}})
147 ; CHECK: v{{[0-9]*}}.h = vasl(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
148 ; CHECK: v{{[0-9]*}}.h = vasr{{[0-9]*}}(v{{[0-9]*}}.h,r{{[0-9]*}})
149 ; CHECK: v{{[0-9]*}}.h = vasr{{[0-9]*}}(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
150 ; CHECK: v{{[0-9]*}}.h = vasr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w,r{{[0-9]*}})
151 ; CHECK: v{{[0-9]*}}.h = vasr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w,r{{[0-9]*}}):{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat
152 ; CHECK: v{{[0-9]*}}.h = vasr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w,r{{[0-9]*}}):{{[0-9]*}}sat
153 ; CHECK: v{{[0-9]*}}.h = vav{{[0-9]*}}g(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
154 ; CHECK: v{{[0-9]*}}.h = vav{{[0-9]*}}g(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}r{{[0-9]*}}nd
155 ; CHECK: v{{[0-9]*}}.h = vdeal(v{{[0-9]*}}.h)
156 ; CHECK: v{{[0-9]*}}.h = vdmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b)
157 ; CHECK: v{{[0-9]*}}.h += vdmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b)
158 ; CHECK: v{{[0-9]*}}.h = vlsr{{[0-9]*}}(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
159 ; CHECK: v{{[0-9]*}}.h = vmax(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
160 ; CHECK: v{{[0-9]*}}.h = vmin(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
161 ; CHECK: v{{[0-9]*}}.h = vmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat
162 ; CHECK: v{{[0-9]*}}.h = vmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}sat
163 ; CHECK: v{{[0-9]*}}.h = vmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat
164 ; CHECK: v{{[0-9]*}}.h = vmpyi(v{{[0-9]*}}.h,r{{[0-9]*}}.b)
165 ; CHECK: v{{[0-9]*}}.h = vmpyi(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
166 ; CHECK: v{{[0-9]*}}.h += vmpyi(v{{[0-9]*}}.h,r{{[0-9]*}}.b)
167 ; CHECK: v{{[0-9]*}}.h += vmpyi(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
168 ; CHECK: v{{[0-9]*}}.h = vnav{{[0-9]*}}g(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
169 ; CHECK: v{{[0-9]*}}.h = vnor{{[0-9]*}}mamt(v{{[0-9]*}}.h)
170 ; CHECK: v{{[0-9]*}}.h = vpack(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat
171 ; CHECK: v{{[0-9]*}}.h = vpacke(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
172 ; CHECK: v{{[0-9]*}}.h = vpacko(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
173 ; CHECK: v{{[0-9]*}}.h = vpopcount(v{{[0-9]*}}.h)
174 ; CHECK: v{{[0-9]*}}.h = vr{{[0-9]*}}ound(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat
175 ; CHECK: v{{[0-9]*}}.h = vsat(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
176 ; CHECK: v{{[0-9]*}}.h = vshuff(v{{[0-9]*}}.h)
177 ; CHECK: v{{[0-9]*}}.h = vshuffe(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
178 ; CHECK: v{{[0-9]*}}.h = vshuffo(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
179 ; CHECK: v{{[0-9]*}}.h = vsub(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
180 ; CHECK: v{{[0-9]*}}.h = vsub(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat
181 ; CHECK: v{{[0-9]*}}.ub = vabsdiff(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub)
182 ; CHECK: v{{[0-9]*}}.ub = vadd(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub):{{[0-9]*}}sat
183 ; CHECK: v{{[0-9]*}}.ub = vasr{{[0-9]*}}(v{{[0-9]*}}.h,v{{[0-9]*}}.h,r{{[0-9]*}}):{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat
184 ; CHECK: v{{[0-9]*}}.ub = vasr{{[0-9]*}}(v{{[0-9]*}}.h,v{{[0-9]*}}.h,r{{[0-9]*}}):{{[0-9]*}}sat
185 ; CHECK: v{{[0-9]*}}.ub = vav{{[0-9]*}}g(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub)
186 ; CHECK: v{{[0-9]*}}.ub = vav{{[0-9]*}}g(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub):{{[0-9]*}}r{{[0-9]*}}nd
187 ; CHECK: v{{[0-9]*}}.ub = vmax(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub)
188 ; CHECK: v{{[0-9]*}}.ub = vmin(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub)
189 ; CHECK: v{{[0-9]*}}.ub = vpack(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat
190 ; CHECK: v{{[0-9]*}}.ub = vr{{[0-9]*}}ound(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat
191 ; CHECK: v{{[0-9]*}}.ub = vsat(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
192 ; CHECK: v{{[0-9]*}}.ub = vsub(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub):{{[0-9]*}}sat
193 ; CHECK: v{{[0-9]*}}.uh = vabsdiff(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
194 ; CHECK: v{{[0-9]*}}.uh = vabsdiff(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh)
195 ; CHECK: v{{[0-9]*}}.uh = vadd(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh):{{[0-9]*}}sat
196 ; CHECK: v{{[0-9]*}}.uh = vasr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w,r{{[0-9]*}}):{{[0-9]*}}sat
197 ; CHECK: v{{[0-9]*}}.uh = vav{{[0-9]*}}g(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh)
198 ; CHECK: v{{[0-9]*}}.uh = vav{{[0-9]*}}g(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh):{{[0-9]*}}r{{[0-9]*}}nd
199 ; CHECK: v{{[0-9]*}}.uh = vcl0(v{{[0-9]*}}.uh)
200 ; CHECK: v{{[0-9]*}}.uh = vlsr{{[0-9]*}}(v{{[0-9]*}}.uh,r{{[0-9]*}})
201 ; CHECK: v{{[0-9]*}}.uh = vmax(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh)
202 ; CHECK: v{{[0-9]*}}.uh = vmin(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh)
203 ; CHECK: v{{[0-9]*}}.uh = vpack(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat
204 ; CHECK: v{{[0-9]*}}.uh = vr{{[0-9]*}}ound(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat
205 ; CHECK: v{{[0-9]*}}.uh = vsub(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh):{{[0-9]*}}sat
206 ; CHECK: v{{[0-9]*}}.uw = vabsdiff(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
207 ; CHECK: v{{[0-9]*}}.uw = vcl0(v{{[0-9]*}}.uw)
208 ; CHECK: v{{[0-9]*}}.uw = vlsr{{[0-9]*}}(v{{[0-9]*}}.uw,r{{[0-9]*}})
209 ; CHECK: v{{[0-9]*}}.uw = vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.ub)
210 ; CHECK: v{{[0-9]*}}.uw = vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub)
211 ; CHECK: v{{[0-9]*}}.uw += vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.ub)
212 ; CHECK: v{{[0-9]*}}.uw += vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub)
213 ; CHECK: v{{[0-9]*}}.w = vabs(v{{[0-9]*}}.w)
214 ; CHECK: v{{[0-9]*}}.w = vabs(v{{[0-9]*}}.w):{{[0-9]*}}sat
215 ; CHECK: v{{[0-9]*}}.w = vadd(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
216 ; CHECK: v{{[0-9]*}}.w = vadd(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat
217 ; CHECK: v{{[0-9]*}}.w = vasl(v{{[0-9]*}}.w,r{{[0-9]*}})
218 ; CHECK: v{{[0-9]*}}.w = vasl(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
219 ; CHECK: v{{[0-9]*}}.w += vasl(v{{[0-9]*}}.w,r{{[0-9]*}})
220 ; CHECK: v{{[0-9]*}}.w = vasr{{[0-9]*}}(v{{[0-9]*}}.w,r{{[0-9]*}})
221 ; CHECK: v{{[0-9]*}}.w = vasr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
222 ; CHECK: v{{[0-9]*}}.w += vasr{{[0-9]*}}(v{{[0-9]*}}.w,r{{[0-9]*}})
223 ; CHECK: v{{[0-9]*}}.w = vav{{[0-9]*}}g(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
224 ; CHECK: v{{[0-9]*}}.w = vav{{[0-9]*}}g(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}r{{[0-9]*}}nd
225 ; CHECK: v{{[0-9]*}}.w = vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.b)
226 ; CHECK: v{{[0-9]*}}.w = vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}sat
227 ; CHECK: v{{[0-9]*}}.w = vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.uh):{{[0-9]*}}sat
228 ; CHECK: v{{[0-9]*}}.w = vdmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat
229 ; CHECK: v{{[0-9]*}}.w = vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}sat
230 ; CHECK: v{{[0-9]*}}.w = vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.uh,#1):{{[0-9]*}}sat
231 ; CHECK: v{{[0-9]*}}.w += vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.b)
232 ; CHECK: v{{[0-9]*}}.w += vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}sat
233 ; CHECK: v{{[0-9]*}}.w += vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.uh):{{[0-9]*}}sat
234 ; CHECK: v{{[0-9]*}}.w += vdmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat
235 ; CHECK: v{{[0-9]*}}.w += vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}sat
236 ; CHECK: v{{[0-9]*}}.w += vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.uh,#1):{{[0-9]*}}sat
237 ; CHECK: v{{[0-9]*}}.w = vinser{{[0-9]*}}t(r{{[0-9]*}})
238 ; CHECK: v{{[0-9]*}}.w = vinser{{[0-9]*}}t(r{{[0-9]*}})
239 ; CHECK: v{{[0-9]*}}.w = vinser{{[0-9]*}}t(r{{[0-9]*}})
240 ; CHECK: v{{[0-9]*}}.w = vlsr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
241 ; CHECK: v{{[0-9]*}}.w = vmax(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
242 ; CHECK: v{{[0-9]*}}.w = vmin(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
243 ; CHECK: v{{[0-9]*}}.w = vmpye(v{{[0-9]*}}.w,v{{[0-9]*}}.uh)
244 ; CHECK: v{{[0-9]*}}.w = vmpyi(v{{[0-9]*}}.w,r{{[0-9]*}}.b)
245 ; CHECK: v{{[0-9]*}}.w = vmpyi(v{{[0-9]*}}.w,r{{[0-9]*}}.h)
246 ; CHECK: v{{[0-9]*}}.w += vmpyi(v{{[0-9]*}}.w,r{{[0-9]*}}.b)
247 ; CHECK: v{{[0-9]*}}.w += vmpyi(v{{[0-9]*}}.w,r{{[0-9]*}}.h)
248 ; CHECK: v{{[0-9]*}}.w = vmpyie(v{{[0-9]*}}.w,v{{[0-9]*}}.uh)
249 ; CHECK: v{{[0-9]*}}.w += vmpyie(v{{[0-9]*}}.w,v{{[0-9]*}}.h)
250 ; CHECK: v{{[0-9]*}}.w += vmpyie(v{{[0-9]*}}.w,v{{[0-9]*}}.uh)
251 ; CHECK: v{{[0-9]*}}.w = vmpyieo(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
252 ; CHECK: v{{[0-9]*}}.w = vmpyio(v{{[0-9]*}}.w,v{{[0-9]*}}.h)
253 ; CHECK: v{{[0-9]*}}.w = vmpyo(v{{[0-9]*}}.w,v{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat
254 ; CHECK: v{{[0-9]*}}.w = vmpyo(v{{[0-9]*}}.w,v{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}sat
255 ; CHECK: v{{[0-9]*}}.w += vmpyo(v{{[0-9]*}}.w,v{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat:{{[0-9]*}}shift
256 ; CHECK: v{{[0-9]*}}.w += vmpyo(v{{[0-9]*}}.w,v{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}sat:{{[0-9]*}}shift
257 ; CHECK: v{{[0-9]*}}.w = vnav{{[0-9]*}}g(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
258 ; CHECK: v{{[0-9]*}}.w = vnor{{[0-9]*}}mamt(v{{[0-9]*}}.w)
259 ; CHECK: v{{[0-9]*}}.w = vr{{[0-9]*}}mpy(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
260 ; CHECK: v{{[0-9]*}}.w = vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b)
261 ; CHECK: v{{[0-9]*}}.w = vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.b)
262 ; CHECK: v{{[0-9]*}}.w += vr{{[0-9]*}}mpy(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
263 ; CHECK: v{{[0-9]*}}.w += vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b)
264 ; CHECK: v{{[0-9]*}}.w += vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.b)
265 ; CHECK: v{{[0-9]*}}.w = vsub(v{{[0-9]*}}.w,v{{[0-9]*}}.w)
266 ; CHECK: v{{[0-9]*}}.w = vsub(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat
267 ; CHECK: v{{[0-9]*}}:{{[0-9]*}} = vcombine(v{{[0-9]*}},v{{[0-9]*}})
268 ; CHECK: v{{[0-9]*}}:{{[0-9]*}} = vdeal(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}})
269 ; CHECK: v{{[0-9]*}}:{{[0-9]*}} = vshuff(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}})
270 ; CHECK: v{{[0-9]*}}:{{[0-9]*}} = vshuff(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}})
271 ; CHECK: v{{[0-9]*}}:{{[0-9]*}} = vshuff(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}})
272 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
273 ; CHECK: v{{[0-9]*}}:{{[0-9]*}} = vswap(q{{[0-3]}},v{{[0-9]*}},v{{[0-9]*}})
274 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.b = vadd(v{{[0-9]*}}:{{[0-9]*}}.b,v{{[0-9]*}}:{{[0-9]*}}.b)
275 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.b = vshuffoe(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
276 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.b = vsub(v{{[0-9]*}}:{{[0-9]*}}.b,v{{[0-9]*}}:{{[0-9]*}}.b)
277 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vadd(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub)
278 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vadd(v{{[0-9]*}}:{{[0-9]*}}.h,v{{[0-9]*}}:{{[0-9]*}}.h)
279 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vadd(v{{[0-9]*}}:{{[0-9]*}}.h,v{{[0-9]*}}:{{[0-9]*}}.h):{{[0-9]*}}sat
280 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vdmpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b)
281 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h += vdmpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b)
282 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vlut16(v{{[0-9]*}}.b,v{{[0-9]*}}.h,r{{[0-9]*}})
283 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h |= vlut16(v{{[0-9]*}}.b,v{{[0-9]*}}.h,r{{[0-9]*}})
284 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vmpa(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b)
285 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vmpa(v{{[0-9]*}}:{{[0-9]*}}.ub,v{{[0-9]*}}:{{[0-9]*}}.b)
286 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vmpa(v{{[0-9]*}}:{{[0-9]*}}.ub,v{{[0-9]*}}:{{[0-9]*}}.ub)
287 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h += vmpa(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b)
288 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vmpy(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
289 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b)
290 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vmpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.b)
291 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h += vmpy(v{{[0-9]*}}.b,v{{[0-9]*}}.b)
292 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h += vmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b)
293 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h += vmpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.b)
294 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vshuffoe(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
295 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vsub(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub)
296 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vsub(v{{[0-9]*}}:{{[0-9]*}}.h,v{{[0-9]*}}:{{[0-9]*}}.h)
297 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vsub(v{{[0-9]*}}:{{[0-9]*}}.h,v{{[0-9]*}}:{{[0-9]*}}.h):{{[0-9]*}}sat
298 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vsxt(v{{[0-9]*}}.b)
299 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vtmpy(v{{[0-9]*}}:{{[0-9]*}}.b,r{{[0-9]*}}.b)
300 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vtmpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b)
301 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h += vtmpy(v{{[0-9]*}}:{{[0-9]*}}.b,r{{[0-9]*}}.b)
302 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h += vtmpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b)
303 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vunpack(v{{[0-9]*}}.b)
304 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h |= vunpacko(v{{[0-9]*}}.b)
305 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.ub = vadd(v{{[0-9]*}}:{{[0-9]*}}.ub,v{{[0-9]*}}:{{[0-9]*}}.ub):{{[0-9]*}}sat
306 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.ub = vsub(v{{[0-9]*}}:{{[0-9]*}}.ub,v{{[0-9]*}}:{{[0-9]*}}.ub):{{[0-9]*}}sat
307 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh = vadd(v{{[0-9]*}}:{{[0-9]*}}.uh,v{{[0-9]*}}:{{[0-9]*}}.uh):{{[0-9]*}}sat
308 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh = vmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.ub)
309 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh = vmpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub)
310 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh += vmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.ub)
311 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh += vmpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub)
312 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh = vsub(v{{[0-9]*}}:{{[0-9]*}}.uh,v{{[0-9]*}}:{{[0-9]*}}.uh):{{[0-9]*}}sat
313 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh = vunpack(v{{[0-9]*}}.ub)
314 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh = vzxt(v{{[0-9]*}}.ub)
315 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw = vdsad(v{{[0-9]*}}:{{[0-9]*}}.uh,r{{[0-9]*}}.uh)
316 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw += vdsad(v{{[0-9]*}}:{{[0-9]*}}.uh,r{{[0-9]*}}.uh)
317 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw = vmpy(v{{[0-9]*}}.uh,r{{[0-9]*}}.uh)
318 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw = vmpy(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh)
319 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw += vmpy(v{{[0-9]*}}.uh,r{{[0-9]*}}.uh)
320 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw += vmpy(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh)
321 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw = vr{{[0-9]*}}mpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.ub,#0)
322 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw += vr{{[0-9]*}}mpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.ub,#0)
323 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw = vr{{[0-9]*}}sad(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.ub,#0)
324 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw += vr{{[0-9]*}}sad(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.ub,#0)
325 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw = vunpack(v{{[0-9]*}}.uh)
326 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw = vzxt(v{{[0-9]*}}.uh)
327 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vadd(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
328 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vadd(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh)
329 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vadd(v{{[0-9]*}}:{{[0-9]*}}.w,v{{[0-9]*}}:{{[0-9]*}}.w)
330 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vadd(v{{[0-9]*}}:{{[0-9]*}}.w,v{{[0-9]*}}:{{[0-9]*}}.w):{{[0-9]*}}sat
331 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b)
332 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w += vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b)
333 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vmpa(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b)
334 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w += vmpa(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b)
335 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h)
336 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
337 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.uh)
338 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w += vmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}sat
339 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w += vmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
340 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w += vmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.uh)
341 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vr{{[0-9]*}}mpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b,#0)
342 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w += vr{{[0-9]*}}mpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b,#0)
343 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vsub(v{{[0-9]*}}.h,v{{[0-9]*}}.h)
344 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vsub(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh)
345 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vsub(v{{[0-9]*}}:{{[0-9]*}}.w,v{{[0-9]*}}:{{[0-9]*}}.w)
346 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vsub(v{{[0-9]*}}:{{[0-9]*}}.w,v{{[0-9]*}}:{{[0-9]*}}.w):{{[0-9]*}}sat
347 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vsxt(v{{[0-9]*}}.h)
348 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vtmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b)
349 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w += vtmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b)
350 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vunpack(v{{[0-9]*}}.h)
351 ; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w |= vunpacko(v{{[0-9]*}}.h)
352 target datalayout = "e-m:e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a:0-n16:32"
353 target triple = "hexagon"
355 @K = global i64 0, align 8
356 @src = global i8 -1, align 1
357 @vecpreds = common global [15 x <16 x i32>] zeroinitializer, align 64
358 @Q6VecPredResult = common global <16 x i32> zeroinitializer, align 64
359 @vectors = common global [15 x <16 x i32>] zeroinitializer, align 64
360 @VectorResult = common global <16 x i32> zeroinitializer, align 64
361 @vector_pairs = common global [15 x <32 x i32>] zeroinitializer, align 128
362 @VectorPairResult = common global <32 x i32> zeroinitializer, align 128
363 @dst_addresses = common global [15 x i8] zeroinitializer, align 8
364 @ptr_addresses = common global [15 x ptr] zeroinitializer, align 8
365 @src_addresses = common global [15 x ptr] zeroinitializer, align 8
366 @dst = common global i8 0, align 1
367 @ptr = common global [32768 x i8] zeroinitializer, align 8
369 ; Function Attrs: nounwind
370 define i32 @main() #0 {
372 %retval = alloca i32, align 4
373 store i32 0, ptr %retval, align 4
374 %0 = load volatile <16 x i32>, ptr @vecpreds, align 64
375 %1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %0, i32 -1)
376 %2 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vecpreds, i32 0, i32 1), align 64
377 %3 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %2, i32 -1)
378 %4 = call <64 x i1> @llvm.hexagon.V6.pred.and(<64 x i1> %1, <64 x i1> %3)
379 %5 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %4, i32 -1)
380 store volatile <16 x i32> %5, ptr @Q6VecPredResult, align 64
381 %6 = load volatile <16 x i32>, ptr @vecpreds, align 64
382 %7 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %6, i32 -1)
383 %8 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vecpreds, i32 0, i32 1), align 64
384 %9 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %8, i32 -1)
385 %10 = call <64 x i1> @llvm.hexagon.V6.pred.and.n(<64 x i1> %7, <64 x i1> %9)
386 %11 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %10, i32 -1)
387 store volatile <16 x i32> %11, ptr @Q6VecPredResult, align 64
388 %12 = load volatile <16 x i32>, ptr @vecpreds, align 64
389 %13 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %12, i32 -1)
390 %14 = call <64 x i1> @llvm.hexagon.V6.pred.not(<64 x i1> %13)
391 %15 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %14, i32 -1)
392 store volatile <16 x i32> %15, ptr @Q6VecPredResult, align 64
393 %16 = load volatile <16 x i32>, ptr @vecpreds, align 64
394 %17 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %16, i32 -1)
395 %18 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vecpreds, i32 0, i32 1), align 64
396 %19 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %18, i32 -1)
397 %20 = call <64 x i1> @llvm.hexagon.V6.pred.or(<64 x i1> %17, <64 x i1> %19)
398 %21 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %20, i32 -1)
399 store volatile <16 x i32> %21, ptr @Q6VecPredResult, align 64
400 %22 = load volatile <16 x i32>, ptr @vecpreds, align 64
401 %23 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %22, i32 -1)
402 %24 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vecpreds, i32 0, i32 1), align 64
403 %25 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %24, i32 -1)
404 %26 = call <64 x i1> @llvm.hexagon.V6.pred.or.n(<64 x i1> %23, <64 x i1> %25)
405 %27 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %26, i32 -1)
406 store volatile <16 x i32> %27, ptr @Q6VecPredResult, align 64
407 %28 = load volatile <16 x i32>, ptr @vectors, align 64
408 %29 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %28, i32 -1)
409 %30 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %29, i32 -1)
410 store volatile <16 x i32> %30, ptr @Q6VecPredResult, align 64
411 %31 = load volatile <16 x i32>, ptr @vecpreds, align 64
412 %32 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %31, i32 -1)
413 %33 = load volatile <16 x i32>, ptr @vectors, align 64
414 %34 = call <64 x i1> @llvm.hexagon.V6.vandvrt.acc(<64 x i1> %32, <16 x i32> %33, i32 -1)
415 %35 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %34, i32 -1)
416 store volatile <16 x i32> %35, ptr @Q6VecPredResult, align 64
417 %36 = load volatile <16 x i32>, ptr @vectors, align 64
418 %37 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
419 %38 = call <64 x i1> @llvm.hexagon.V6.veqb(<16 x i32> %36, <16 x i32> %37)
420 %39 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %38, i32 -1)
421 store volatile <16 x i32> %39, ptr @Q6VecPredResult, align 64
422 %40 = load volatile <16 x i32>, ptr @vectors, align 64
423 %41 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
424 %42 = call <64 x i1> @llvm.hexagon.V6.veqh(<16 x i32> %40, <16 x i32> %41)
425 %43 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %42, i32 -1)
426 store volatile <16 x i32> %43, ptr @Q6VecPredResult, align 64
427 %44 = load volatile <16 x i32>, ptr @vectors, align 64
428 %45 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
429 %46 = call <64 x i1> @llvm.hexagon.V6.veqw(<16 x i32> %44, <16 x i32> %45)
430 %47 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %46, i32 -1)
431 store volatile <16 x i32> %47, ptr @Q6VecPredResult, align 64
432 %48 = load volatile <16 x i32>, ptr @vecpreds, align 64
433 %49 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %48, i32 -1)
434 %50 = load volatile <16 x i32>, ptr @vectors, align 64
435 %51 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
436 %52 = call <64 x i1> @llvm.hexagon.V6.veqb.and(<64 x i1> %49, <16 x i32> %50, <16 x i32> %51)
437 %53 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %52, i32 -1)
438 store volatile <16 x i32> %53, ptr @Q6VecPredResult, align 64
439 %54 = load volatile <16 x i32>, ptr @vecpreds, align 64
440 %55 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %54, i32 -1)
441 %56 = load volatile <16 x i32>, ptr @vectors, align 64
442 %57 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
443 %58 = call <64 x i1> @llvm.hexagon.V6.veqh.and(<64 x i1> %55, <16 x i32> %56, <16 x i32> %57)
444 %59 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %58, i32 -1)
445 store volatile <16 x i32> %59, ptr @Q6VecPredResult, align 64
446 %60 = load volatile <16 x i32>, ptr @vecpreds, align 64
447 %61 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %60, i32 -1)
448 %62 = load volatile <16 x i32>, ptr @vectors, align 64
449 %63 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
450 %64 = call <64 x i1> @llvm.hexagon.V6.veqw.and(<64 x i1> %61, <16 x i32> %62, <16 x i32> %63)
451 %65 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %64, i32 -1)
452 store volatile <16 x i32> %65, ptr @Q6VecPredResult, align 64
453 %66 = load volatile <16 x i32>, ptr @vecpreds, align 64
454 %67 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %66, i32 -1)
455 %68 = load volatile <16 x i32>, ptr @vectors, align 64
456 %69 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
457 %70 = call <64 x i1> @llvm.hexagon.V6.veqb.or(<64 x i1> %67, <16 x i32> %68, <16 x i32> %69)
458 %71 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %70, i32 -1)
459 store volatile <16 x i32> %71, ptr @Q6VecPredResult, align 64
460 %72 = load volatile <16 x i32>, ptr @vecpreds, align 64
461 %73 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %72, i32 -1)
462 %74 = load volatile <16 x i32>, ptr @vectors, align 64
463 %75 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
464 %76 = call <64 x i1> @llvm.hexagon.V6.veqh.or(<64 x i1> %73, <16 x i32> %74, <16 x i32> %75)
465 %77 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %76, i32 -1)
466 store volatile <16 x i32> %77, ptr @Q6VecPredResult, align 64
467 %78 = load volatile <16 x i32>, ptr @vecpreds, align 64
468 %79 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %78, i32 -1)
469 %80 = load volatile <16 x i32>, ptr @vectors, align 64
470 %81 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
471 %82 = call <64 x i1> @llvm.hexagon.V6.veqw.or(<64 x i1> %79, <16 x i32> %80, <16 x i32> %81)
472 %83 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %82, i32 -1)
473 store volatile <16 x i32> %83, ptr @Q6VecPredResult, align 64
474 %84 = load volatile <16 x i32>, ptr @vecpreds, align 64
475 %85 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %84, i32 -1)
476 %86 = load volatile <16 x i32>, ptr @vectors, align 64
477 %87 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
478 %88 = call <64 x i1> @llvm.hexagon.V6.veqb.xor(<64 x i1> %85, <16 x i32> %86, <16 x i32> %87)
479 %89 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %88, i32 -1)
480 store volatile <16 x i32> %89, ptr @Q6VecPredResult, align 64
481 %90 = load volatile <16 x i32>, ptr @vecpreds, align 64
482 %91 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %90, i32 -1)
483 %92 = load volatile <16 x i32>, ptr @vectors, align 64
484 %93 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
485 %94 = call <64 x i1> @llvm.hexagon.V6.veqh.xor(<64 x i1> %91, <16 x i32> %92, <16 x i32> %93)
486 %95 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %94, i32 -1)
487 store volatile <16 x i32> %95, ptr @Q6VecPredResult, align 64
488 %96 = load volatile <16 x i32>, ptr @vecpreds, align 64
489 %97 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %96, i32 -1)
490 %98 = load volatile <16 x i32>, ptr @vectors, align 64
491 %99 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
492 %100 = call <64 x i1> @llvm.hexagon.V6.veqw.xor(<64 x i1> %97, <16 x i32> %98, <16 x i32> %99)
493 %101 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %100, i32 -1)
494 store volatile <16 x i32> %101, ptr @Q6VecPredResult, align 64
495 %102 = load volatile <16 x i32>, ptr @vectors, align 64
496 %103 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
497 %104 = call <64 x i1> @llvm.hexagon.V6.vgtb(<16 x i32> %102, <16 x i32> %103)
498 %105 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %104, i32 -1)
499 store volatile <16 x i32> %105, ptr @Q6VecPredResult, align 64
500 %106 = load volatile <16 x i32>, ptr @vectors, align 64
501 %107 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
502 %108 = call <64 x i1> @llvm.hexagon.V6.vgth(<16 x i32> %106, <16 x i32> %107)
503 %109 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %108, i32 -1)
504 store volatile <16 x i32> %109, ptr @Q6VecPredResult, align 64
505 %110 = load volatile <16 x i32>, ptr @vectors, align 64
506 %111 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
507 %112 = call <64 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %110, <16 x i32> %111)
508 %113 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %112, i32 -1)
509 store volatile <16 x i32> %113, ptr @Q6VecPredResult, align 64
510 %114 = load volatile <16 x i32>, ptr @vectors, align 64
511 %115 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
512 %116 = call <64 x i1> @llvm.hexagon.V6.vgtuh(<16 x i32> %114, <16 x i32> %115)
513 %117 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %116, i32 -1)
514 store volatile <16 x i32> %117, ptr @Q6VecPredResult, align 64
515 %118 = load volatile <16 x i32>, ptr @vectors, align 64
516 %119 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
517 %120 = call <64 x i1> @llvm.hexagon.V6.vgtuw(<16 x i32> %118, <16 x i32> %119)
518 %121 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %120, i32 -1)
519 store volatile <16 x i32> %121, ptr @Q6VecPredResult, align 64
520 %122 = load volatile <16 x i32>, ptr @vectors, align 64
521 %123 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
522 %124 = call <64 x i1> @llvm.hexagon.V6.vgtw(<16 x i32> %122, <16 x i32> %123)
523 %125 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %124, i32 -1)
524 store volatile <16 x i32> %125, ptr @Q6VecPredResult, align 64
525 %126 = load volatile <16 x i32>, ptr @vecpreds, align 64
526 %127 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %126, i32 -1)
527 %128 = load volatile <16 x i32>, ptr @vectors, align 64
528 %129 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
529 %130 = call <64 x i1> @llvm.hexagon.V6.vgtb.and(<64 x i1> %127, <16 x i32> %128, <16 x i32> %129)
530 %131 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %130, i32 -1)
531 store volatile <16 x i32> %131, ptr @Q6VecPredResult, align 64
532 %132 = load volatile <16 x i32>, ptr @vecpreds, align 64
533 %133 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %132, i32 -1)
534 %134 = load volatile <16 x i32>, ptr @vectors, align 64
535 %135 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
536 %136 = call <64 x i1> @llvm.hexagon.V6.vgth.and(<64 x i1> %133, <16 x i32> %134, <16 x i32> %135)
537 %137 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %136, i32 -1)
538 store volatile <16 x i32> %137, ptr @Q6VecPredResult, align 64
539 %138 = load volatile <16 x i32>, ptr @vecpreds, align 64
540 %139 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %138, i32 -1)
541 %140 = load volatile <16 x i32>, ptr @vectors, align 64
542 %141 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
543 %142 = call <64 x i1> @llvm.hexagon.V6.vgtub.and(<64 x i1> %139, <16 x i32> %140, <16 x i32> %141)
544 %143 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %142, i32 -1)
545 store volatile <16 x i32> %143, ptr @Q6VecPredResult, align 64
546 %144 = load volatile <16 x i32>, ptr @vecpreds, align 64
547 %145 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %144, i32 -1)
548 %146 = load volatile <16 x i32>, ptr @vectors, align 64
549 %147 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
550 %148 = call <64 x i1> @llvm.hexagon.V6.vgtuh.and(<64 x i1> %145, <16 x i32> %146, <16 x i32> %147)
551 %149 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %148, i32 -1)
552 store volatile <16 x i32> %149, ptr @Q6VecPredResult, align 64
553 %150 = load volatile <16 x i32>, ptr @vecpreds, align 64
554 %151 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %150, i32 -1)
555 %152 = load volatile <16 x i32>, ptr @vectors, align 64
556 %153 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
557 %154 = call <64 x i1> @llvm.hexagon.V6.vgtuw.and(<64 x i1> %151, <16 x i32> %152, <16 x i32> %153)
558 %155 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %154, i32 -1)
559 store volatile <16 x i32> %155, ptr @Q6VecPredResult, align 64
560 %156 = load volatile <16 x i32>, ptr @vecpreds, align 64
561 %157 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %156, i32 -1)
562 %158 = load volatile <16 x i32>, ptr @vectors, align 64
563 %159 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
564 %160 = call <64 x i1> @llvm.hexagon.V6.vgtw.and(<64 x i1> %157, <16 x i32> %158, <16 x i32> %159)
565 %161 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %160, i32 -1)
566 store volatile <16 x i32> %161, ptr @Q6VecPredResult, align 64
567 %162 = load volatile <16 x i32>, ptr @vecpreds, align 64
568 %163 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %162, i32 -1)
569 %164 = load volatile <16 x i32>, ptr @vectors, align 64
570 %165 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
571 %166 = call <64 x i1> @llvm.hexagon.V6.vgtb.or(<64 x i1> %163, <16 x i32> %164, <16 x i32> %165)
572 %167 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %166, i32 -1)
573 store volatile <16 x i32> %167, ptr @Q6VecPredResult, align 64
574 %168 = load volatile <16 x i32>, ptr @vecpreds, align 64
575 %169 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %168, i32 -1)
576 %170 = load volatile <16 x i32>, ptr @vectors, align 64
577 %171 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
578 %172 = call <64 x i1> @llvm.hexagon.V6.vgth.or(<64 x i1> %169, <16 x i32> %170, <16 x i32> %171)
579 %173 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %172, i32 -1)
580 store volatile <16 x i32> %173, ptr @Q6VecPredResult, align 64
581 %174 = load volatile <16 x i32>, ptr @vecpreds, align 64
582 %175 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %174, i32 -1)
583 %176 = load volatile <16 x i32>, ptr @vectors, align 64
584 %177 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
585 %178 = call <64 x i1> @llvm.hexagon.V6.vgtub.or(<64 x i1> %175, <16 x i32> %176, <16 x i32> %177)
586 %179 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %178, i32 -1)
587 store volatile <16 x i32> %179, ptr @Q6VecPredResult, align 64
588 %180 = load volatile <16 x i32>, ptr @vecpreds, align 64
589 %181 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %180, i32 -1)
590 %182 = load volatile <16 x i32>, ptr @vectors, align 64
591 %183 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
592 %184 = call <64 x i1> @llvm.hexagon.V6.vgtuh.or(<64 x i1> %181, <16 x i32> %182, <16 x i32> %183)
593 %185 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %184, i32 -1)
594 store volatile <16 x i32> %185, ptr @Q6VecPredResult, align 64
595 %186 = load volatile <16 x i32>, ptr @vecpreds, align 64
596 %187 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %186, i32 -1)
597 %188 = load volatile <16 x i32>, ptr @vectors, align 64
598 %189 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
599 %190 = call <64 x i1> @llvm.hexagon.V6.vgtuw.or(<64 x i1> %187, <16 x i32> %188, <16 x i32> %189)
600 %191 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %190, i32 -1)
601 store volatile <16 x i32> %191, ptr @Q6VecPredResult, align 64
602 %192 = load volatile <16 x i32>, ptr @vecpreds, align 64
603 %193 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %192, i32 -1)
604 %194 = load volatile <16 x i32>, ptr @vectors, align 64
605 %195 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
606 %196 = call <64 x i1> @llvm.hexagon.V6.vgtw.or(<64 x i1> %193, <16 x i32> %194, <16 x i32> %195)
607 %197 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %196, i32 -1)
608 store volatile <16 x i32> %197, ptr @Q6VecPredResult, align 64
609 %198 = load volatile <16 x i32>, ptr @vecpreds, align 64
610 %199 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %198, i32 -1)
611 %200 = load volatile <16 x i32>, ptr @vectors, align 64
612 %201 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
613 %202 = call <64 x i1> @llvm.hexagon.V6.vgtb.xor(<64 x i1> %199, <16 x i32> %200, <16 x i32> %201)
614 %203 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %202, i32 -1)
615 store volatile <16 x i32> %203, ptr @Q6VecPredResult, align 64
616 %204 = load volatile <16 x i32>, ptr @vecpreds, align 64
617 %205 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %204, i32 -1)
618 %206 = load volatile <16 x i32>, ptr @vectors, align 64
619 %207 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
620 %208 = call <64 x i1> @llvm.hexagon.V6.vgth.xor(<64 x i1> %205, <16 x i32> %206, <16 x i32> %207)
621 %209 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %208, i32 -1)
622 store volatile <16 x i32> %209, ptr @Q6VecPredResult, align 64
623 %210 = load volatile <16 x i32>, ptr @vecpreds, align 64
624 %211 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %210, i32 -1)
625 %212 = load volatile <16 x i32>, ptr @vectors, align 64
626 %213 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
627 %214 = call <64 x i1> @llvm.hexagon.V6.vgtub.xor(<64 x i1> %211, <16 x i32> %212, <16 x i32> %213)
628 %215 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %214, i32 -1)
629 store volatile <16 x i32> %215, ptr @Q6VecPredResult, align 64
630 %216 = load volatile <16 x i32>, ptr @vecpreds, align 64
631 %217 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %216, i32 -1)
632 %218 = load volatile <16 x i32>, ptr @vectors, align 64
633 %219 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
634 %220 = call <64 x i1> @llvm.hexagon.V6.vgtuh.xor(<64 x i1> %217, <16 x i32> %218, <16 x i32> %219)
635 %221 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %220, i32 -1)
636 store volatile <16 x i32> %221, ptr @Q6VecPredResult, align 64
637 %222 = load volatile <16 x i32>, ptr @vecpreds, align 64
638 %223 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %222, i32 -1)
639 %224 = load volatile <16 x i32>, ptr @vectors, align 64
640 %225 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
641 %226 = call <64 x i1> @llvm.hexagon.V6.vgtuw.xor(<64 x i1> %223, <16 x i32> %224, <16 x i32> %225)
642 %227 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %226, i32 -1)
643 store volatile <16 x i32> %227, ptr @Q6VecPredResult, align 64
644 %228 = load volatile <16 x i32>, ptr @vecpreds, align 64
645 %229 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %228, i32 -1)
646 %230 = load volatile <16 x i32>, ptr @vectors, align 64
647 %231 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
648 %232 = call <64 x i1> @llvm.hexagon.V6.vgtw.xor(<64 x i1> %229, <16 x i32> %230, <16 x i32> %231)
649 %233 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %232, i32 -1)
650 store volatile <16 x i32> %233, ptr @Q6VecPredResult, align 64
651 %234 = call <64 x i1> @llvm.hexagon.V6.pred.scalar2(i32 1)
652 %235 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %234, i32 -1)
653 store volatile <16 x i32> %235, ptr @Q6VecPredResult, align 64
654 %236 = load volatile <16 x i32>, ptr @vecpreds, align 64
655 %237 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %236, i32 -1)
656 %238 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vecpreds, i32 0, i32 1), align 64
657 %239 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %238, i32 -1)
658 %240 = call <64 x i1> @llvm.hexagon.V6.pred.xor(<64 x i1> %237, <64 x i1> %239)
659 %241 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %240, i32 -1)
660 store volatile <16 x i32> %241, ptr @Q6VecPredResult, align 64
661 %242 = load volatile <16 x i32>, ptr @vectors, align 64
662 %243 = call <16 x i32> @llvm.hexagon.V6.vassign(<16 x i32> %242)
663 store volatile <16 x i32> %243, ptr @VectorResult, align 64
664 %244 = load volatile <32 x i32>, ptr @vector_pairs, align 128
665 %245 = call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %244)
666 store volatile <16 x i32> %245, ptr @VectorResult, align 64
667 %246 = load volatile <32 x i32>, ptr @vector_pairs, align 128
668 %247 = call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %246)
669 store volatile <16 x i32> %247, ptr @VectorResult, align 64
670 %248 = load volatile <16 x i32>, ptr @vectors, align 64
671 %249 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
672 %250 = call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %248, <16 x i32> %249, i32 1)
673 store volatile <16 x i32> %250, ptr @VectorResult, align 64
674 %251 = load volatile <16 x i32>, ptr @vectors, align 64
675 %252 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
676 %253 = call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %251, <16 x i32> %252, i32 -1)
677 store volatile <16 x i32> %253, ptr @VectorResult, align 64
678 %254 = load volatile <16 x i32>, ptr @vecpreds, align 64
679 %255 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %254, i32 -1)
680 %256 = call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %255, i32 -1)
681 store volatile <16 x i32> %256, ptr @VectorResult, align 64
682 %257 = load volatile <16 x i32>, ptr @vectors, align 64
683 %258 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
684 %259 = call <16 x i32> @llvm.hexagon.V6.vand(<16 x i32> %257, <16 x i32> %258)
685 store volatile <16 x i32> %259, ptr @VectorResult, align 64
686 %260 = load volatile <16 x i32>, ptr @vectors, align 64
687 %261 = load volatile <16 x i32>, ptr @vecpreds, align 64
688 %262 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %261, i32 -1)
689 %263 = call <16 x i32> @llvm.hexagon.V6.vandqrt.acc(<16 x i32> %260, <64 x i1> %262, i32 -1)
690 store volatile <16 x i32> %263, ptr @VectorResult, align 64
691 %264 = load volatile <16 x i32>, ptr @vectors, align 64
692 %265 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
693 %266 = call <16 x i32> @llvm.hexagon.V6.vdelta(<16 x i32> %264, <16 x i32> %265)
694 store volatile <16 x i32> %266, ptr @VectorResult, align 64
695 %267 = load volatile <16 x i32>, ptr @vectors, align 64
696 %268 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
697 %269 = call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %267, <16 x i32> %268, i32 1)
698 store volatile <16 x i32> %269, ptr @VectorResult, align 64
699 %270 = load volatile <16 x i32>, ptr @vectors, align 64
700 %271 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
701 %272 = call <16 x i32> @llvm.hexagon.V6.vlalignb(<16 x i32> %270, <16 x i32> %271, i32 -1)
702 store volatile <16 x i32> %272, ptr @VectorResult, align 64
703 %273 = load volatile <16 x i32>, ptr @vecpreds, align 64
704 %274 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %273, i32 -1)
705 %275 = load volatile <16 x i32>, ptr @vectors, align 64
706 %276 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
707 %277 = call <16 x i32> @llvm.hexagon.V6.vmux(<64 x i1> %274, <16 x i32> %275, <16 x i32> %276)
708 store volatile <16 x i32> %277, ptr @VectorResult, align 64
709 %278 = load volatile <16 x i32>, ptr @vectors, align 64
710 %279 = call <16 x i32> @llvm.hexagon.V6.vnot(<16 x i32> %278)
711 store volatile <16 x i32> %279, ptr @VectorResult, align 64
712 %280 = load volatile <16 x i32>, ptr @vectors, align 64
713 %281 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
714 %282 = call <16 x i32> @llvm.hexagon.V6.vor(<16 x i32> %280, <16 x i32> %281)
715 store volatile <16 x i32> %282, ptr @VectorResult, align 64
716 %283 = load volatile <16 x i32>, ptr @vectors, align 64
717 %284 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
718 %285 = call <16 x i32> @llvm.hexagon.V6.vrdelta(<16 x i32> %283, <16 x i32> %284)
719 store volatile <16 x i32> %285, ptr @VectorResult, align 64
720 %286 = load volatile <16 x i32>, ptr @vectors, align 64
721 %287 = call <16 x i32> @llvm.hexagon.V6.vror(<16 x i32> %286, i32 -1)
722 store volatile <16 x i32> %287, ptr @VectorResult, align 64
723 %288 = call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 -1)
724 store volatile <16 x i32> %288, ptr @VectorResult, align 64
725 %289 = load volatile <16 x i32>, ptr @vectors, align 64
726 %290 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
727 %291 = call <16 x i32> @llvm.hexagon.V6.vxor(<16 x i32> %289, <16 x i32> %290)
728 store volatile <16 x i32> %291, ptr @VectorResult, align 64
729 %292 = call <16 x i32> @llvm.hexagon.V6.vd0()
730 store volatile <16 x i32> %292, ptr @VectorResult, align 64
731 %293 = load volatile <16 x i32>, ptr @vecpreds, align 64
732 %294 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %293, i32 -1)
733 %295 = load volatile <16 x i32>, ptr @vectors, align 64
734 %296 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
735 %297 = call <16 x i32> @llvm.hexagon.V6.vaddbq(<64 x i1> %294, <16 x i32> %295, <16 x i32> %296)
736 store volatile <16 x i32> %297, ptr @VectorResult, align 64
737 %298 = load volatile <16 x i32>, ptr @vecpreds, align 64
738 %299 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %298, i32 -1)
739 %300 = load volatile <16 x i32>, ptr @vectors, align 64
740 %301 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
741 %302 = call <16 x i32> @llvm.hexagon.V6.vaddbnq(<64 x i1> %299, <16 x i32> %300, <16 x i32> %301)
742 store volatile <16 x i32> %302, ptr @VectorResult, align 64
743 %303 = load volatile <16 x i32>, ptr @vecpreds, align 64
744 %304 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %303, i32 -1)
745 %305 = load volatile <16 x i32>, ptr @vectors, align 64
746 %306 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
747 %307 = call <16 x i32> @llvm.hexagon.V6.vsubbq(<64 x i1> %304, <16 x i32> %305, <16 x i32> %306)
748 store volatile <16 x i32> %307, ptr @VectorResult, align 64
749 %308 = load volatile <16 x i32>, ptr @vecpreds, align 64
750 %309 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %308, i32 -1)
751 %310 = load volatile <16 x i32>, ptr @vectors, align 64
752 %311 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
753 %312 = call <16 x i32> @llvm.hexagon.V6.vsubbnq(<64 x i1> %309, <16 x i32> %310, <16 x i32> %311)
754 store volatile <16 x i32> %312, ptr @VectorResult, align 64
755 %313 = load volatile <16 x i32>, ptr @vectors, align 64
756 %314 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
757 %315 = call <16 x i32> @llvm.hexagon.V6.vaddb(<16 x i32> %313, <16 x i32> %314)
758 store volatile <16 x i32> %315, ptr @VectorResult, align 64
759 %316 = load volatile <16 x i32>, ptr @vectors, align 64
760 %317 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
761 %318 = call <16 x i32> @llvm.hexagon.V6.vasrhbrndsat(<16 x i32> %316, <16 x i32> %317, i32 -1)
762 store volatile <16 x i32> %318, ptr @VectorResult, align 64
763 %319 = load volatile <16 x i32>, ptr @vectors, align 64
764 %320 = call <16 x i32> @llvm.hexagon.V6.vdealb(<16 x i32> %319)
765 store volatile <16 x i32> %320, ptr @VectorResult, align 64
766 %321 = load volatile <16 x i32>, ptr @vectors, align 64
767 %322 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
768 %323 = call <16 x i32> @llvm.hexagon.V6.vdealb4w(<16 x i32> %321, <16 x i32> %322)
769 store volatile <16 x i32> %323, ptr @VectorResult, align 64
770 %324 = load volatile <16 x i32>, ptr @vectors, align 64
771 %325 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
772 %326 = call <16 x i32> @llvm.hexagon.V6.vlutvvb(<16 x i32> %324, <16 x i32> %325, i32 -1)
773 store volatile <16 x i32> %326, ptr @VectorResult, align 64
774 %327 = load volatile <16 x i32>, ptr @vectors, align 64
775 %328 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
776 %329 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
777 %330 = call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %327, <16 x i32> %328, <16 x i32> %329, i32 -1)
778 store volatile <16 x i32> %330, ptr @VectorResult, align 64
779 %331 = load volatile <16 x i32>, ptr @vectors, align 64
780 %332 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
781 %333 = call <16 x i32> @llvm.hexagon.V6.vnavgub(<16 x i32> %331, <16 x i32> %332)
782 store volatile <16 x i32> %333, ptr @VectorResult, align 64
783 %334 = load volatile <16 x i32>, ptr @vectors, align 64
784 %335 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
785 %336 = call <16 x i32> @llvm.hexagon.V6.vpackhb.sat(<16 x i32> %334, <16 x i32> %335)
786 store volatile <16 x i32> %336, ptr @VectorResult, align 64
787 %337 = load volatile <16 x i32>, ptr @vectors, align 64
788 %338 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
789 %339 = call <16 x i32> @llvm.hexagon.V6.vpackeb(<16 x i32> %337, <16 x i32> %338)
790 store volatile <16 x i32> %339, ptr @VectorResult, align 64
791 %340 = load volatile <16 x i32>, ptr @vectors, align 64
792 %341 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
793 %342 = call <16 x i32> @llvm.hexagon.V6.vpackob(<16 x i32> %340, <16 x i32> %341)
794 store volatile <16 x i32> %342, ptr @VectorResult, align 64
795 %343 = load volatile <16 x i32>, ptr @vectors, align 64
796 %344 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
797 %345 = call <16 x i32> @llvm.hexagon.V6.vroundhb(<16 x i32> %343, <16 x i32> %344)
798 store volatile <16 x i32> %345, ptr @VectorResult, align 64
799 %346 = load volatile <16 x i32>, ptr @vectors, align 64
800 %347 = call <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32> %346)
801 store volatile <16 x i32> %347, ptr @VectorResult, align 64
802 %348 = load volatile <16 x i32>, ptr @vectors, align 64
803 %349 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
804 %350 = call <16 x i32> @llvm.hexagon.V6.vshuffeb(<16 x i32> %348, <16 x i32> %349)
805 store volatile <16 x i32> %350, ptr @VectorResult, align 64
806 %351 = load volatile <16 x i32>, ptr @vectors, align 64
807 %352 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
808 %353 = call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> %351, <16 x i32> %352)
809 store volatile <16 x i32> %353, ptr @VectorResult, align 64
810 %354 = load volatile <16 x i32>, ptr @vectors, align 64
811 %355 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
812 %356 = call <16 x i32> @llvm.hexagon.V6.vsubb(<16 x i32> %354, <16 x i32> %355)
813 store volatile <16 x i32> %356, ptr @VectorResult, align 64
814 %357 = load volatile <16 x i32>, ptr @vecpreds, align 64
815 %358 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %357, i32 -1)
816 %359 = load volatile <16 x i32>, ptr @vectors, align 64
817 %360 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
818 %361 = call <16 x i32> @llvm.hexagon.V6.vaddhq(<64 x i1> %358, <16 x i32> %359, <16 x i32> %360)
819 store volatile <16 x i32> %361, ptr @VectorResult, align 64
820 %362 = load volatile <16 x i32>, ptr @vecpreds, align 64
821 %363 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %362, i32 -1)
822 %364 = load volatile <16 x i32>, ptr @vectors, align 64
823 %365 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
824 %366 = call <16 x i32> @llvm.hexagon.V6.vaddhnq(<64 x i1> %363, <16 x i32> %364, <16 x i32> %365)
825 store volatile <16 x i32> %366, ptr @VectorResult, align 64
826 %367 = load volatile <16 x i32>, ptr @vecpreds, align 64
827 %368 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %367, i32 -1)
828 %369 = load volatile <16 x i32>, ptr @vectors, align 64
829 %370 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
830 %371 = call <16 x i32> @llvm.hexagon.V6.vsubhq(<64 x i1> %368, <16 x i32> %369, <16 x i32> %370)
831 store volatile <16 x i32> %371, ptr @VectorResult, align 64
832 %372 = load volatile <16 x i32>, ptr @vecpreds, align 64
833 %373 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %372, i32 -1)
834 %374 = load volatile <16 x i32>, ptr @vectors, align 64
835 %375 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
836 %376 = call <16 x i32> @llvm.hexagon.V6.vsubhnq(<64 x i1> %373, <16 x i32> %374, <16 x i32> %375)
837 store volatile <16 x i32> %376, ptr @VectorResult, align 64
838 %377 = load volatile <16 x i32>, ptr @vectors, align 64
839 %378 = call <16 x i32> @llvm.hexagon.V6.vabsh(<16 x i32> %377)
840 store volatile <16 x i32> %378, ptr @VectorResult, align 64
841 %379 = load volatile <16 x i32>, ptr @vectors, align 64
842 %380 = call <16 x i32> @llvm.hexagon.V6.vabsh.sat(<16 x i32> %379)
843 store volatile <16 x i32> %380, ptr @VectorResult, align 64
844 %381 = load volatile <16 x i32>, ptr @vectors, align 64
845 %382 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
846 %383 = call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %381, <16 x i32> %382)
847 store volatile <16 x i32> %383, ptr @VectorResult, align 64
848 %384 = load volatile <16 x i32>, ptr @vectors, align 64
849 %385 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
850 %386 = call <16 x i32> @llvm.hexagon.V6.vaddhsat(<16 x i32> %384, <16 x i32> %385)
851 store volatile <16 x i32> %386, ptr @VectorResult, align 64
852 %387 = load volatile <16 x i32>, ptr @vectors, align 64
853 %388 = call <16 x i32> @llvm.hexagon.V6.vaslh(<16 x i32> %387, i32 -1)
854 store volatile <16 x i32> %388, ptr @VectorResult, align 64
855 %389 = load volatile <16 x i32>, ptr @vectors, align 64
856 %390 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
857 %391 = call <16 x i32> @llvm.hexagon.V6.vaslhv(<16 x i32> %389, <16 x i32> %390)
858 store volatile <16 x i32> %391, ptr @VectorResult, align 64
859 %392 = load volatile <16 x i32>, ptr @vectors, align 64
860 %393 = call <16 x i32> @llvm.hexagon.V6.vasrh(<16 x i32> %392, i32 -1)
861 store volatile <16 x i32> %393, ptr @VectorResult, align 64
862 %394 = load volatile <16 x i32>, ptr @vectors, align 64
863 %395 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
864 %396 = call <16 x i32> @llvm.hexagon.V6.vasrhv(<16 x i32> %394, <16 x i32> %395)
865 store volatile <16 x i32> %396, ptr @VectorResult, align 64
866 %397 = load volatile <16 x i32>, ptr @vectors, align 64
867 %398 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
868 %399 = call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %397, <16 x i32> %398, i32 -1)
869 store volatile <16 x i32> %399, ptr @VectorResult, align 64
870 %400 = load volatile <16 x i32>, ptr @vectors, align 64
871 %401 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
872 %402 = call <16 x i32> @llvm.hexagon.V6.vasrwhrndsat(<16 x i32> %400, <16 x i32> %401, i32 -1)
873 store volatile <16 x i32> %402, ptr @VectorResult, align 64
874 %403 = load volatile <16 x i32>, ptr @vectors, align 64
875 %404 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
876 %405 = call <16 x i32> @llvm.hexagon.V6.vasrwhsat(<16 x i32> %403, <16 x i32> %404, i32 -1)
877 store volatile <16 x i32> %405, ptr @VectorResult, align 64
878 %406 = load volatile <16 x i32>, ptr @vectors, align 64
879 %407 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
880 %408 = call <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32> %406, <16 x i32> %407)
881 store volatile <16 x i32> %408, ptr @VectorResult, align 64
882 %409 = load volatile <16 x i32>, ptr @vectors, align 64
883 %410 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
884 %411 = call <16 x i32> @llvm.hexagon.V6.vavghrnd(<16 x i32> %409, <16 x i32> %410)
885 store volatile <16 x i32> %411, ptr @VectorResult, align 64
886 %412 = load volatile <16 x i32>, ptr @vectors, align 64
887 %413 = call <16 x i32> @llvm.hexagon.V6.vdealh(<16 x i32> %412)
888 store volatile <16 x i32> %413, ptr @VectorResult, align 64
889 %414 = load volatile <16 x i32>, ptr @vectors, align 64
890 %415 = call <16 x i32> @llvm.hexagon.V6.vdmpybus(<16 x i32> %414, i32 -1)
891 store volatile <16 x i32> %415, ptr @VectorResult, align 64
892 %416 = load volatile <16 x i32>, ptr @vectors, align 64
893 %417 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
894 %418 = call <16 x i32> @llvm.hexagon.V6.vdmpybus.acc(<16 x i32> %416, <16 x i32> %417, i32 -1)
895 store volatile <16 x i32> %418, ptr @VectorResult, align 64
896 %419 = load volatile <16 x i32>, ptr @vectors, align 64
897 %420 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
898 %421 = call <16 x i32> @llvm.hexagon.V6.vlsrhv(<16 x i32> %419, <16 x i32> %420)
899 store volatile <16 x i32> %421, ptr @VectorResult, align 64
900 %422 = load volatile <16 x i32>, ptr @vectors, align 64
901 %423 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
902 %424 = call <16 x i32> @llvm.hexagon.V6.vmaxh(<16 x i32> %422, <16 x i32> %423)
903 store volatile <16 x i32> %424, ptr @VectorResult, align 64
904 %425 = load volatile <16 x i32>, ptr @vectors, align 64
905 %426 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
906 %427 = call <16 x i32> @llvm.hexagon.V6.vminh(<16 x i32> %425, <16 x i32> %426)
907 store volatile <16 x i32> %427, ptr @VectorResult, align 64
908 %428 = load volatile <16 x i32>, ptr @vectors, align 64
909 %429 = call <16 x i32> @llvm.hexagon.V6.vmpyhsrs(<16 x i32> %428, i32 -1)
910 store volatile <16 x i32> %429, ptr @VectorResult, align 64
911 %430 = load volatile <16 x i32>, ptr @vectors, align 64
912 %431 = call <16 x i32> @llvm.hexagon.V6.vmpyhss(<16 x i32> %430, i32 -1)
913 store volatile <16 x i32> %431, ptr @VectorResult, align 64
914 %432 = load volatile <16 x i32>, ptr @vectors, align 64
915 %433 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
916 %434 = call <16 x i32> @llvm.hexagon.V6.vmpyhvsrs(<16 x i32> %432, <16 x i32> %433)
917 store volatile <16 x i32> %434, ptr @VectorResult, align 64
918 %435 = load volatile <16 x i32>, ptr @vectors, align 64
919 %436 = call <16 x i32> @llvm.hexagon.V6.vmpyihb(<16 x i32> %435, i32 -1)
920 store volatile <16 x i32> %436, ptr @VectorResult, align 64
921 %437 = load volatile <16 x i32>, ptr @vectors, align 64
922 %438 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
923 %439 = call <16 x i32> @llvm.hexagon.V6.vmpyih(<16 x i32> %437, <16 x i32> %438)
924 store volatile <16 x i32> %439, ptr @VectorResult, align 64
925 %440 = load volatile <16 x i32>, ptr @vectors, align 64
926 %441 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
927 %442 = call <16 x i32> @llvm.hexagon.V6.vmpyihb.acc(<16 x i32> %440, <16 x i32> %441, i32 -1)
928 store volatile <16 x i32> %442, ptr @VectorResult, align 64
929 %443 = load volatile <16 x i32>, ptr @vectors, align 64
930 %444 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
931 %445 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
932 %446 = call <16 x i32> @llvm.hexagon.V6.vmpyih.acc(<16 x i32> %443, <16 x i32> %444, <16 x i32> %445)
933 store volatile <16 x i32> %446, ptr @VectorResult, align 64
934 %447 = load volatile <16 x i32>, ptr @vectors, align 64
935 %448 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
936 %449 = call <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32> %447, <16 x i32> %448)
937 store volatile <16 x i32> %449, ptr @VectorResult, align 64
938 %450 = load volatile <16 x i32>, ptr @vectors, align 64
939 %451 = call <16 x i32> @llvm.hexagon.V6.vnormamth(<16 x i32> %450)
940 store volatile <16 x i32> %451, ptr @VectorResult, align 64
941 %452 = load volatile <16 x i32>, ptr @vectors, align 64
942 %453 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
943 %454 = call <16 x i32> @llvm.hexagon.V6.vpackwh.sat(<16 x i32> %452, <16 x i32> %453)
944 store volatile <16 x i32> %454, ptr @VectorResult, align 64
945 %455 = load volatile <16 x i32>, ptr @vectors, align 64
946 %456 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
947 %457 = call <16 x i32> @llvm.hexagon.V6.vpackeh(<16 x i32> %455, <16 x i32> %456)
948 store volatile <16 x i32> %457, ptr @VectorResult, align 64
949 %458 = load volatile <16 x i32>, ptr @vectors, align 64
950 %459 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
951 %460 = call <16 x i32> @llvm.hexagon.V6.vpackoh(<16 x i32> %458, <16 x i32> %459)
952 store volatile <16 x i32> %460, ptr @VectorResult, align 64
953 %461 = load volatile <16 x i32>, ptr @vectors, align 64
954 %462 = call <16 x i32> @llvm.hexagon.V6.vpopcounth(<16 x i32> %461)
955 store volatile <16 x i32> %462, ptr @VectorResult, align 64
956 %463 = load volatile <16 x i32>, ptr @vectors, align 64
957 %464 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
958 %465 = call <16 x i32> @llvm.hexagon.V6.vroundwh(<16 x i32> %463, <16 x i32> %464)
959 store volatile <16 x i32> %465, ptr @VectorResult, align 64
960 %466 = load volatile <16 x i32>, ptr @vectors, align 64
961 %467 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
962 %468 = call <16 x i32> @llvm.hexagon.V6.vsatwh(<16 x i32> %466, <16 x i32> %467)
963 store volatile <16 x i32> %468, ptr @VectorResult, align 64
964 %469 = load volatile <16 x i32>, ptr @vectors, align 64
965 %470 = call <16 x i32> @llvm.hexagon.V6.vshuffh(<16 x i32> %469)
966 store volatile <16 x i32> %470, ptr @VectorResult, align 64
967 %471 = load volatile <16 x i32>, ptr @vectors, align 64
968 %472 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
969 %473 = call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %471, <16 x i32> %472)
970 store volatile <16 x i32> %473, ptr @VectorResult, align 64
971 %474 = load volatile <16 x i32>, ptr @vectors, align 64
972 %475 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
973 %476 = call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %474, <16 x i32> %475)
974 store volatile <16 x i32> %476, ptr @VectorResult, align 64
975 %477 = load volatile <16 x i32>, ptr @vectors, align 64
976 %478 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
977 %479 = call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %477, <16 x i32> %478)
978 store volatile <16 x i32> %479, ptr @VectorResult, align 64
979 %480 = load volatile <16 x i32>, ptr @vectors, align 64
980 %481 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
981 %482 = call <16 x i32> @llvm.hexagon.V6.vsubhsat(<16 x i32> %480, <16 x i32> %481)
982 store volatile <16 x i32> %482, ptr @VectorResult, align 64
983 %483 = load volatile <16 x i32>, ptr @vectors, align 64
984 %484 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
985 %485 = call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %483, <16 x i32> %484)
986 store volatile <16 x i32> %485, ptr @VectorResult, align 64
987 %486 = load volatile <16 x i32>, ptr @vectors, align 64
988 %487 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
989 %488 = call <16 x i32> @llvm.hexagon.V6.vaddubsat(<16 x i32> %486, <16 x i32> %487)
990 store volatile <16 x i32> %488, ptr @VectorResult, align 64
991 %489 = load volatile <16 x i32>, ptr @vectors, align 64
992 %490 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
993 %491 = call <16 x i32> @llvm.hexagon.V6.vasrhubrndsat(<16 x i32> %489, <16 x i32> %490, i32 -1)
994 store volatile <16 x i32> %491, ptr @VectorResult, align 64
995 %492 = load volatile <16 x i32>, ptr @vectors, align 64
996 %493 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
997 %494 = call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %492, <16 x i32> %493, i32 -1)
998 store volatile <16 x i32> %494, ptr @VectorResult, align 64
999 %495 = load volatile <16 x i32>, ptr @vectors, align 64
1000 %496 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1001 %497 = call <16 x i32> @llvm.hexagon.V6.vavgub(<16 x i32> %495, <16 x i32> %496)
1002 store volatile <16 x i32> %497, ptr @VectorResult, align 64
1003 %498 = load volatile <16 x i32>, ptr @vectors, align 64
1004 %499 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1005 %500 = call <16 x i32> @llvm.hexagon.V6.vavgubrnd(<16 x i32> %498, <16 x i32> %499)
1006 store volatile <16 x i32> %500, ptr @VectorResult, align 64
1007 %501 = load volatile <16 x i32>, ptr @vectors, align 64
1008 %502 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1009 %503 = call <16 x i32> @llvm.hexagon.V6.vmaxub(<16 x i32> %501, <16 x i32> %502)
1010 store volatile <16 x i32> %503, ptr @VectorResult, align 64
1011 %504 = load volatile <16 x i32>, ptr @vectors, align 64
1012 %505 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1013 %506 = call <16 x i32> @llvm.hexagon.V6.vminub(<16 x i32> %504, <16 x i32> %505)
1014 store volatile <16 x i32> %506, ptr @VectorResult, align 64
1015 %507 = load volatile <16 x i32>, ptr @vectors, align 64
1016 %508 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1017 %509 = call <16 x i32> @llvm.hexagon.V6.vpackhub.sat(<16 x i32> %507, <16 x i32> %508)
1018 store volatile <16 x i32> %509, ptr @VectorResult, align 64
1019 %510 = load volatile <16 x i32>, ptr @vectors, align 64
1020 %511 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1021 %512 = call <16 x i32> @llvm.hexagon.V6.vroundhub(<16 x i32> %510, <16 x i32> %511)
1022 store volatile <16 x i32> %512, ptr @VectorResult, align 64
1023 %513 = load volatile <16 x i32>, ptr @vectors, align 64
1024 %514 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1025 %515 = call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %513, <16 x i32> %514)
1026 store volatile <16 x i32> %515, ptr @VectorResult, align 64
1027 %516 = load volatile <16 x i32>, ptr @vectors, align 64
1028 %517 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1029 %518 = call <16 x i32> @llvm.hexagon.V6.vsububsat(<16 x i32> %516, <16 x i32> %517)
1030 store volatile <16 x i32> %518, ptr @VectorResult, align 64
1031 %519 = load volatile <16 x i32>, ptr @vectors, align 64
1032 %520 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1033 %521 = call <16 x i32> @llvm.hexagon.V6.vabsdiffh(<16 x i32> %519, <16 x i32> %520)
1034 store volatile <16 x i32> %521, ptr @VectorResult, align 64
1035 %522 = load volatile <16 x i32>, ptr @vectors, align 64
1036 %523 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1037 %524 = call <16 x i32> @llvm.hexagon.V6.vabsdiffuh(<16 x i32> %522, <16 x i32> %523)
1038 store volatile <16 x i32> %524, ptr @VectorResult, align 64
1039 %525 = load volatile <16 x i32>, ptr @vectors, align 64
1040 %526 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1041 %527 = call <16 x i32> @llvm.hexagon.V6.vadduhsat(<16 x i32> %525, <16 x i32> %526)
1042 store volatile <16 x i32> %527, ptr @VectorResult, align 64
1043 %528 = load volatile <16 x i32>, ptr @vectors, align 64
1044 %529 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1045 %530 = call <16 x i32> @llvm.hexagon.V6.vasrwuhsat(<16 x i32> %528, <16 x i32> %529, i32 -1)
1046 store volatile <16 x i32> %530, ptr @VectorResult, align 64
1047 %531 = load volatile <16 x i32>, ptr @vectors, align 64
1048 %532 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1049 %533 = call <16 x i32> @llvm.hexagon.V6.vavguh(<16 x i32> %531, <16 x i32> %532)
1050 store volatile <16 x i32> %533, ptr @VectorResult, align 64
1051 %534 = load volatile <16 x i32>, ptr @vectors, align 64
1052 %535 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1053 %536 = call <16 x i32> @llvm.hexagon.V6.vavguhrnd(<16 x i32> %534, <16 x i32> %535)
1054 store volatile <16 x i32> %536, ptr @VectorResult, align 64
1055 %537 = load volatile <16 x i32>, ptr @vectors, align 64
1056 %538 = call <16 x i32> @llvm.hexagon.V6.vcl0h(<16 x i32> %537)
1057 store volatile <16 x i32> %538, ptr @VectorResult, align 64
1058 %539 = load volatile <16 x i32>, ptr @vectors, align 64
1059 %540 = call <16 x i32> @llvm.hexagon.V6.vlsrh(<16 x i32> %539, i32 -1)
1060 store volatile <16 x i32> %540, ptr @VectorResult, align 64
1061 %541 = load volatile <16 x i32>, ptr @vectors, align 64
1062 %542 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1063 %543 = call <16 x i32> @llvm.hexagon.V6.vmaxuh(<16 x i32> %541, <16 x i32> %542)
1064 store volatile <16 x i32> %543, ptr @VectorResult, align 64
1065 %544 = load volatile <16 x i32>, ptr @vectors, align 64
1066 %545 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1067 %546 = call <16 x i32> @llvm.hexagon.V6.vminuh(<16 x i32> %544, <16 x i32> %545)
1068 store volatile <16 x i32> %546, ptr @VectorResult, align 64
1069 %547 = load volatile <16 x i32>, ptr @vectors, align 64
1070 %548 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1071 %549 = call <16 x i32> @llvm.hexagon.V6.vpackwuh.sat(<16 x i32> %547, <16 x i32> %548)
1072 store volatile <16 x i32> %549, ptr @VectorResult, align 64
1073 %550 = load volatile <16 x i32>, ptr @vectors, align 64
1074 %551 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1075 %552 = call <16 x i32> @llvm.hexagon.V6.vroundwuh(<16 x i32> %550, <16 x i32> %551)
1076 store volatile <16 x i32> %552, ptr @VectorResult, align 64
1077 %553 = load volatile <16 x i32>, ptr @vectors, align 64
1078 %554 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1079 %555 = call <16 x i32> @llvm.hexagon.V6.vsubuhsat(<16 x i32> %553, <16 x i32> %554)
1080 store volatile <16 x i32> %555, ptr @VectorResult, align 64
1081 %556 = load volatile <16 x i32>, ptr @vectors, align 64
1082 %557 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1083 %558 = call <16 x i32> @llvm.hexagon.V6.vabsdiffw(<16 x i32> %556, <16 x i32> %557)
1084 store volatile <16 x i32> %558, ptr @VectorResult, align 64
1085 %559 = load volatile <16 x i32>, ptr @vectors, align 64
1086 %560 = call <16 x i32> @llvm.hexagon.V6.vcl0w(<16 x i32> %559)
1087 store volatile <16 x i32> %560, ptr @VectorResult, align 64
1088 %561 = load volatile <16 x i32>, ptr @vectors, align 64
1089 %562 = call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %561, i32 -1)
1090 store volatile <16 x i32> %562, ptr @VectorResult, align 64
1091 %563 = load volatile <16 x i32>, ptr @vectors, align 64
1092 %564 = call <16 x i32> @llvm.hexagon.V6.vrmpyub(<16 x i32> %563, i32 -1)
1093 store volatile <16 x i32> %564, ptr @VectorResult, align 64
1094 %565 = load volatile <16 x i32>, ptr @vectors, align 64
1095 %566 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1096 %567 = call <16 x i32> @llvm.hexagon.V6.vrmpyubv(<16 x i32> %565, <16 x i32> %566)
1097 store volatile <16 x i32> %567, ptr @VectorResult, align 64
1098 %568 = load volatile <16 x i32>, ptr @vectors, align 64
1099 %569 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1100 %570 = call <16 x i32> @llvm.hexagon.V6.vrmpyub.acc(<16 x i32> %568, <16 x i32> %569, i32 -1)
1101 store volatile <16 x i32> %570, ptr @VectorResult, align 64
1102 %571 = load volatile <16 x i32>, ptr @vectors, align 64
1103 %572 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1104 %573 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
1105 %574 = call <16 x i32> @llvm.hexagon.V6.vrmpyubv.acc(<16 x i32> %571, <16 x i32> %572, <16 x i32> %573)
1106 store volatile <16 x i32> %574, ptr @VectorResult, align 64
1107 %575 = load volatile <16 x i32>, ptr @vecpreds, align 64
1108 %576 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %575, i32 -1)
1109 %577 = load volatile <16 x i32>, ptr @vectors, align 64
1110 %578 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1111 %579 = call <16 x i32> @llvm.hexagon.V6.vaddwq(<64 x i1> %576, <16 x i32> %577, <16 x i32> %578)
1112 store volatile <16 x i32> %579, ptr @VectorResult, align 64
1113 %580 = load volatile <16 x i32>, ptr @vecpreds, align 64
1114 %581 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %580, i32 -1)
1115 %582 = load volatile <16 x i32>, ptr @vectors, align 64
1116 %583 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1117 %584 = call <16 x i32> @llvm.hexagon.V6.vaddwnq(<64 x i1> %581, <16 x i32> %582, <16 x i32> %583)
1118 store volatile <16 x i32> %584, ptr @VectorResult, align 64
1119 %585 = load volatile <16 x i32>, ptr @vecpreds, align 64
1120 %586 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %585, i32 -1)
1121 %587 = load volatile <16 x i32>, ptr @vectors, align 64
1122 %588 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1123 %589 = call <16 x i32> @llvm.hexagon.V6.vsubwq(<64 x i1> %586, <16 x i32> %587, <16 x i32> %588)
1124 store volatile <16 x i32> %589, ptr @VectorResult, align 64
1125 %590 = load volatile <16 x i32>, ptr @vecpreds, align 64
1126 %591 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %590, i32 -1)
1127 %592 = load volatile <16 x i32>, ptr @vectors, align 64
1128 %593 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1129 %594 = call <16 x i32> @llvm.hexagon.V6.vsubwnq(<64 x i1> %591, <16 x i32> %592, <16 x i32> %593)
1130 store volatile <16 x i32> %594, ptr @VectorResult, align 64
1131 %595 = load volatile <16 x i32>, ptr @vectors, align 64
1132 %596 = call <16 x i32> @llvm.hexagon.V6.vabsw(<16 x i32> %595)
1133 store volatile <16 x i32> %596, ptr @VectorResult, align 64
1134 %597 = load volatile <16 x i32>, ptr @vectors, align 64
1135 %598 = call <16 x i32> @llvm.hexagon.V6.vabsw.sat(<16 x i32> %597)
1136 store volatile <16 x i32> %598, ptr @VectorResult, align 64
1137 %599 = load volatile <16 x i32>, ptr @vectors, align 64
1138 %600 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1139 %601 = call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %599, <16 x i32> %600)
1140 store volatile <16 x i32> %601, ptr @VectorResult, align 64
1141 %602 = load volatile <16 x i32>, ptr @vectors, align 64
1142 %603 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1143 %604 = call <16 x i32> @llvm.hexagon.V6.vaddwsat(<16 x i32> %602, <16 x i32> %603)
1144 store volatile <16 x i32> %604, ptr @VectorResult, align 64
1145 %605 = load volatile <16 x i32>, ptr @vectors, align 64
1146 %606 = call <16 x i32> @llvm.hexagon.V6.vaslw(<16 x i32> %605, i32 -1)
1147 store volatile <16 x i32> %606, ptr @VectorResult, align 64
1148 %607 = load volatile <16 x i32>, ptr @vectors, align 64
1149 %608 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1150 %609 = call <16 x i32> @llvm.hexagon.V6.vaslwv(<16 x i32> %607, <16 x i32> %608)
1151 store volatile <16 x i32> %609, ptr @VectorResult, align 64
1152 %610 = load volatile <16 x i32>, ptr @vectors, align 64
1153 %611 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1154 %612 = call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> %610, <16 x i32> %611, i32 -1)
1155 store volatile <16 x i32> %612, ptr @VectorResult, align 64
1156 %613 = load volatile <16 x i32>, ptr @vectors, align 64
1157 %614 = call <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32> %613, i32 -1)
1158 store volatile <16 x i32> %614, ptr @VectorResult, align 64
1159 %615 = load volatile <16 x i32>, ptr @vectors, align 64
1160 %616 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1161 %617 = call <16 x i32> @llvm.hexagon.V6.vasrwv(<16 x i32> %615, <16 x i32> %616)
1162 store volatile <16 x i32> %617, ptr @VectorResult, align 64
1163 %618 = load volatile <16 x i32>, ptr @vectors, align 64
1164 %619 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1165 %620 = call <16 x i32> @llvm.hexagon.V6.vasrw.acc(<16 x i32> %618, <16 x i32> %619, i32 -1)
1166 store volatile <16 x i32> %620, ptr @VectorResult, align 64
1167 %621 = load volatile <16 x i32>, ptr @vectors, align 64
1168 %622 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1169 %623 = call <16 x i32> @llvm.hexagon.V6.vavgw(<16 x i32> %621, <16 x i32> %622)
1170 store volatile <16 x i32> %623, ptr @VectorResult, align 64
1171 %624 = load volatile <16 x i32>, ptr @vectors, align 64
1172 %625 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1173 %626 = call <16 x i32> @llvm.hexagon.V6.vavgwrnd(<16 x i32> %624, <16 x i32> %625)
1174 store volatile <16 x i32> %626, ptr @VectorResult, align 64
1175 %627 = load volatile <16 x i32>, ptr @vectors, align 64
1176 %628 = call <16 x i32> @llvm.hexagon.V6.vdmpyhb(<16 x i32> %627, i32 -1)
1177 store volatile <16 x i32> %628, ptr @VectorResult, align 64
1178 %629 = load volatile <16 x i32>, ptr @vectors, align 64
1179 %630 = call <16 x i32> @llvm.hexagon.V6.vdmpyhsat(<16 x i32> %629, i32 -1)
1180 store volatile <16 x i32> %630, ptr @VectorResult, align 64
1181 %631 = load volatile <16 x i32>, ptr @vectors, align 64
1182 %632 = call <16 x i32> @llvm.hexagon.V6.vdmpyhsusat(<16 x i32> %631, i32 -1)
1183 store volatile <16 x i32> %632, ptr @VectorResult, align 64
1184 %633 = load volatile <16 x i32>, ptr @vectors, align 64
1185 %634 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1186 %635 = call <16 x i32> @llvm.hexagon.V6.vdmpyhvsat(<16 x i32> %633, <16 x i32> %634)
1187 store volatile <16 x i32> %635, ptr @VectorResult, align 64
1188 %636 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1189 %637 = call <16 x i32> @llvm.hexagon.V6.vdmpyhisat(<32 x i32> %636, i32 -1)
1190 store volatile <16 x i32> %637, ptr @VectorResult, align 64
1191 %638 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1192 %639 = call <16 x i32> @llvm.hexagon.V6.vdmpyhsuisat(<32 x i32> %638, i32 -1)
1193 store volatile <16 x i32> %639, ptr @VectorResult, align 64
1194 %640 = load volatile <16 x i32>, ptr @vectors, align 64
1195 %641 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1196 %642 = call <16 x i32> @llvm.hexagon.V6.vdmpyhb.acc(<16 x i32> %640, <16 x i32> %641, i32 -1)
1197 store volatile <16 x i32> %642, ptr @VectorResult, align 64
1198 %643 = load volatile <16 x i32>, ptr @vectors, align 64
1199 %644 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1200 %645 = call <16 x i32> @llvm.hexagon.V6.vdmpyhsat.acc(<16 x i32> %643, <16 x i32> %644, i32 -1)
1201 store volatile <16 x i32> %645, ptr @VectorResult, align 64
1202 %646 = load volatile <16 x i32>, ptr @vectors, align 64
1203 %647 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1204 %648 = call <16 x i32> @llvm.hexagon.V6.vdmpyhsusat.acc(<16 x i32> %646, <16 x i32> %647, i32 -1)
1205 store volatile <16 x i32> %648, ptr @VectorResult, align 64
1206 %649 = load volatile <16 x i32>, ptr @vectors, align 64
1207 %650 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1208 %651 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
1209 %652 = call <16 x i32> @llvm.hexagon.V6.vdmpyhvsat.acc(<16 x i32> %649, <16 x i32> %650, <16 x i32> %651)
1210 store volatile <16 x i32> %652, ptr @VectorResult, align 64
1211 %653 = load volatile <16 x i32>, ptr @vectors, align 64
1212 %654 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1213 %655 = call <16 x i32> @llvm.hexagon.V6.vdmpyhisat.acc(<16 x i32> %653, <32 x i32> %654, i32 -1)
1214 store volatile <16 x i32> %655, ptr @VectorResult, align 64
1215 %656 = load volatile <16 x i32>, ptr @vectors, align 64
1216 %657 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1217 %658 = call <16 x i32> @llvm.hexagon.V6.vdmpyhsuisat.acc(<16 x i32> %656, <32 x i32> %657, i32 -1)
1218 store volatile <16 x i32> %658, ptr @VectorResult, align 64
1219 %659 = load volatile <16 x i32>, ptr @vectors, align 64
1220 %660 = call <16 x i32> @llvm.hexagon.V6.vinsertwr(<16 x i32> %659, i32 -1)
1221 store volatile <16 x i32> %660, ptr @VectorResult, align 64
1222 %661 = load volatile <16 x i32>, ptr @vectors, align 64
1223 %662 = call <16 x i32> @llvm.hexagon.V6.vinsertwr(<16 x i32> %661, i32 0)
1224 store volatile <16 x i32> %662, ptr @VectorResult, align 64
1225 %663 = load volatile <16 x i32>, ptr @vectors, align 64
1226 %664 = call <16 x i32> @llvm.hexagon.V6.vinsertwr(<16 x i32> %663, i32 1)
1227 store volatile <16 x i32> %664, ptr @VectorResult, align 64
1228 %665 = load volatile <16 x i32>, ptr @vectors, align 64
1229 %666 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1230 %667 = call <16 x i32> @llvm.hexagon.V6.vlsrwv(<16 x i32> %665, <16 x i32> %666)
1231 store volatile <16 x i32> %667, ptr @VectorResult, align 64
1232 %668 = load volatile <16 x i32>, ptr @vectors, align 64
1233 %669 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1234 %670 = call <16 x i32> @llvm.hexagon.V6.vmaxw(<16 x i32> %668, <16 x i32> %669)
1235 store volatile <16 x i32> %670, ptr @VectorResult, align 64
1236 %671 = load volatile <16 x i32>, ptr @vectors, align 64
1237 %672 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1238 %673 = call <16 x i32> @llvm.hexagon.V6.vminw(<16 x i32> %671, <16 x i32> %672)
1239 store volatile <16 x i32> %673, ptr @VectorResult, align 64
1240 %674 = load volatile <16 x i32>, ptr @vectors, align 64
1241 %675 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1242 %676 = call <16 x i32> @llvm.hexagon.V6.vmpyewuh(<16 x i32> %674, <16 x i32> %675)
1243 store volatile <16 x i32> %676, ptr @VectorResult, align 64
1244 %677 = load volatile <16 x i32>, ptr @vectors, align 64
1245 %678 = call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %677, i32 -1)
1246 store volatile <16 x i32> %678, ptr @VectorResult, align 64
1247 %679 = load volatile <16 x i32>, ptr @vectors, align 64
1248 %680 = call <16 x i32> @llvm.hexagon.V6.vmpyiwh(<16 x i32> %679, i32 -1)
1249 store volatile <16 x i32> %680, ptr @VectorResult, align 64
1250 %681 = load volatile <16 x i32>, ptr @vectors, align 64
1251 %682 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1252 %683 = call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %681, <16 x i32> %682, i32 -1)
1253 store volatile <16 x i32> %683, ptr @VectorResult, align 64
1254 %684 = load volatile <16 x i32>, ptr @vectors, align 64
1255 %685 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1256 %686 = call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %684, <16 x i32> %685, i32 -1)
1257 store volatile <16 x i32> %686, ptr @VectorResult, align 64
1258 %687 = load volatile <16 x i32>, ptr @vectors, align 64
1259 %688 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1260 %689 = call <16 x i32> @llvm.hexagon.V6.vmpyiewuh(<16 x i32> %687, <16 x i32> %688)
1261 store volatile <16 x i32> %689, ptr @VectorResult, align 64
1262 %690 = load volatile <16 x i32>, ptr @vectors, align 64
1263 %691 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1264 %692 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
1265 %693 = call <16 x i32> @llvm.hexagon.V6.vmpyiewh.acc(<16 x i32> %690, <16 x i32> %691, <16 x i32> %692)
1266 store volatile <16 x i32> %693, ptr @VectorResult, align 64
1267 %694 = load volatile <16 x i32>, ptr @vectors, align 64
1268 %695 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1269 %696 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
1270 %697 = call <16 x i32> @llvm.hexagon.V6.vmpyiewuh.acc(<16 x i32> %694, <16 x i32> %695, <16 x i32> %696)
1271 store volatile <16 x i32> %697, ptr @VectorResult, align 64
1272 %698 = load volatile <16 x i32>, ptr @vectors, align 64
1273 %699 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1274 %700 = call <16 x i32> @llvm.hexagon.V6.vmpyieoh(<16 x i32> %698, <16 x i32> %699)
1275 store volatile <16 x i32> %700, ptr @VectorResult, align 64
1276 %701 = load volatile <16 x i32>, ptr @vectors, align 64
1277 %702 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1278 %703 = call <16 x i32> @llvm.hexagon.V6.vmpyiowh(<16 x i32> %701, <16 x i32> %702)
1279 store volatile <16 x i32> %703, ptr @VectorResult, align 64
1280 %704 = load volatile <16 x i32>, ptr @vectors, align 64
1281 %705 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1282 %706 = call <16 x i32> @llvm.hexagon.V6.vmpyowh.rnd(<16 x i32> %704, <16 x i32> %705)
1283 store volatile <16 x i32> %706, ptr @VectorResult, align 64
1284 %707 = load volatile <16 x i32>, ptr @vectors, align 64
1285 %708 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1286 %709 = call <16 x i32> @llvm.hexagon.V6.vmpyowh(<16 x i32> %707, <16 x i32> %708)
1287 store volatile <16 x i32> %709, ptr @VectorResult, align 64
1288 %710 = load volatile <16 x i32>, ptr @vectors, align 64
1289 %711 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1290 %712 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
1291 %713 = call <16 x i32> @llvm.hexagon.V6.vmpyowh.rnd.sacc(<16 x i32> %710, <16 x i32> %711, <16 x i32> %712)
1292 store volatile <16 x i32> %713, ptr @VectorResult, align 64
1293 %714 = load volatile <16 x i32>, ptr @vectors, align 64
1294 %715 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1295 %716 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
1296 %717 = call <16 x i32> @llvm.hexagon.V6.vmpyowh.sacc(<16 x i32> %714, <16 x i32> %715, <16 x i32> %716)
1297 store volatile <16 x i32> %717, ptr @VectorResult, align 64
1298 %718 = load volatile <16 x i32>, ptr @vectors, align 64
1299 %719 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1300 %720 = call <16 x i32> @llvm.hexagon.V6.vnavgw(<16 x i32> %718, <16 x i32> %719)
1301 store volatile <16 x i32> %720, ptr @VectorResult, align 64
1302 %721 = load volatile <16 x i32>, ptr @vectors, align 64
1303 %722 = call <16 x i32> @llvm.hexagon.V6.vnormamtw(<16 x i32> %721)
1304 store volatile <16 x i32> %722, ptr @VectorResult, align 64
1305 %723 = load volatile <16 x i32>, ptr @vectors, align 64
1306 %724 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1307 %725 = call <16 x i32> @llvm.hexagon.V6.vrmpybv(<16 x i32> %723, <16 x i32> %724)
1308 store volatile <16 x i32> %725, ptr @VectorResult, align 64
1309 %726 = load volatile <16 x i32>, ptr @vectors, align 64
1310 %727 = call <16 x i32> @llvm.hexagon.V6.vrmpybus(<16 x i32> %726, i32 -1)
1311 store volatile <16 x i32> %727, ptr @VectorResult, align 64
1312 %728 = load volatile <16 x i32>, ptr @vectors, align 64
1313 %729 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1314 %730 = call <16 x i32> @llvm.hexagon.V6.vrmpybusv(<16 x i32> %728, <16 x i32> %729)
1315 store volatile <16 x i32> %730, ptr @VectorResult, align 64
1316 %731 = load volatile <16 x i32>, ptr @vectors, align 64
1317 %732 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1318 %733 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
1319 %734 = call <16 x i32> @llvm.hexagon.V6.vrmpybv.acc(<16 x i32> %731, <16 x i32> %732, <16 x i32> %733)
1320 store volatile <16 x i32> %734, ptr @VectorResult, align 64
1321 %735 = load volatile <16 x i32>, ptr @vectors, align 64
1322 %736 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1323 %737 = call <16 x i32> @llvm.hexagon.V6.vrmpybus.acc(<16 x i32> %735, <16 x i32> %736, i32 -1)
1324 store volatile <16 x i32> %737, ptr @VectorResult, align 64
1325 %738 = load volatile <16 x i32>, ptr @vectors, align 64
1326 %739 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1327 %740 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
1328 %741 = call <16 x i32> @llvm.hexagon.V6.vrmpybusv.acc(<16 x i32> %738, <16 x i32> %739, <16 x i32> %740)
1329 store volatile <16 x i32> %741, ptr @VectorResult, align 64
1330 %742 = load volatile <16 x i32>, ptr @vectors, align 64
1331 %743 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1332 %744 = call <16 x i32> @llvm.hexagon.V6.vsubw(<16 x i32> %742, <16 x i32> %743)
1333 store volatile <16 x i32> %744, ptr @VectorResult, align 64
1334 %745 = load volatile <16 x i32>, ptr @vectors, align 64
1335 %746 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1336 %747 = call <16 x i32> @llvm.hexagon.V6.vsubwsat(<16 x i32> %745, <16 x i32> %746)
1337 store volatile <16 x i32> %747, ptr @VectorResult, align 64
1338 %748 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1339 %749 = call <32 x i32> @llvm.hexagon.V6.vassignp(<32 x i32> %748)
1340 store volatile <32 x i32> %749, ptr @VectorPairResult, align 128
1341 %750 = load volatile <16 x i32>, ptr @vectors, align 64
1342 %751 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1343 %752 = call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %750, <16 x i32> %751)
1344 store volatile <32 x i32> %752, ptr @VectorPairResult, align 128
1345 %753 = load volatile <16 x i32>, ptr @vectors, align 64
1346 %754 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1347 %755 = call <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32> %753, <16 x i32> %754, i32 -1)
1348 store volatile <32 x i32> %755, ptr @VectorPairResult, align 128
1349 %756 = load volatile <16 x i32>, ptr @vectors, align 64
1350 %757 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1351 %758 = call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %756, <16 x i32> %757, i32 -1)
1352 store volatile <32 x i32> %758, ptr @VectorPairResult, align 128
1353 %759 = load volatile <16 x i32>, ptr @vectors, align 64
1354 %760 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1355 %761 = call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %759, <16 x i32> %760, i32 0)
1356 store volatile <32 x i32> %761, ptr @VectorPairResult, align 128
1357 %762 = load volatile <16 x i32>, ptr @vectors, align 64
1358 %763 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1359 %764 = call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %762, <16 x i32> %763, i32 1)
1360 store volatile <32 x i32> %764, ptr @VectorPairResult, align 128
1361 %765 = load volatile <16 x i32>, ptr @vecpreds, align 64
1362 %766 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %765, i32 -1)
1363 %767 = load volatile <16 x i32>, ptr @vectors, align 64
1364 %768 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1365 %769 = call <32 x i32> @llvm.hexagon.V6.vswap(<64 x i1> %766, <16 x i32> %767, <16 x i32> %768)
1366 store volatile <32 x i32> %769, ptr @VectorPairResult, align 128
1367 %770 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1368 %771 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1369 %772 = call <32 x i32> @llvm.hexagon.V6.vaddb.dv(<32 x i32> %770, <32 x i32> %771)
1370 store volatile <32 x i32> %772, ptr @VectorPairResult, align 128
1371 %773 = load volatile <16 x i32>, ptr @vectors, align 64
1372 %774 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1373 %775 = call <32 x i32> @llvm.hexagon.V6.vshufoeb(<16 x i32> %773, <16 x i32> %774)
1374 store volatile <32 x i32> %775, ptr @VectorPairResult, align 128
1375 %776 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1376 %777 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1377 %778 = call <32 x i32> @llvm.hexagon.V6.vsubb.dv(<32 x i32> %776, <32 x i32> %777)
1378 store volatile <32 x i32> %778, ptr @VectorPairResult, align 128
1379 %779 = load volatile <16 x i32>, ptr @vectors, align 64
1380 %780 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1381 %781 = call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %779, <16 x i32> %780)
1382 store volatile <32 x i32> %781, ptr @VectorPairResult, align 128
1383 %782 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1384 %783 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1385 %784 = call <32 x i32> @llvm.hexagon.V6.vaddh.dv(<32 x i32> %782, <32 x i32> %783)
1386 store volatile <32 x i32> %784, ptr @VectorPairResult, align 128
1387 %785 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1388 %786 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1389 %787 = call <32 x i32> @llvm.hexagon.V6.vaddhsat.dv(<32 x i32> %785, <32 x i32> %786)
1390 store volatile <32 x i32> %787, ptr @VectorPairResult, align 128
1391 %788 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1392 %789 = call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32> %788, i32 -1)
1393 store volatile <32 x i32> %789, ptr @VectorPairResult, align 128
1394 %790 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1395 %791 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1396 %792 = call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %790, <32 x i32> %791, i32 -1)
1397 store volatile <32 x i32> %792, ptr @VectorPairResult, align 128
1398 %793 = load volatile <16 x i32>, ptr @vectors, align 64
1399 %794 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1400 %795 = call <32 x i32> @llvm.hexagon.V6.vlutvwh(<16 x i32> %793, <16 x i32> %794, i32 -1)
1401 store volatile <32 x i32> %795, ptr @VectorPairResult, align 128
1402 %796 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1403 %797 = load volatile <16 x i32>, ptr @vectors, align 64
1404 %798 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1405 %799 = call <32 x i32> @llvm.hexagon.V6.vlutvwh.oracc(<32 x i32> %796, <16 x i32> %797, <16 x i32> %798, i32 -1)
1406 store volatile <32 x i32> %799, ptr @VectorPairResult, align 128
1407 %800 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1408 %801 = call <32 x i32> @llvm.hexagon.V6.vmpabus(<32 x i32> %800, i32 -1)
1409 store volatile <32 x i32> %801, ptr @VectorPairResult, align 128
1410 %802 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1411 %803 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1412 %804 = call <32 x i32> @llvm.hexagon.V6.vmpabusv(<32 x i32> %802, <32 x i32> %803)
1413 store volatile <32 x i32> %804, ptr @VectorPairResult, align 128
1414 %805 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1415 %806 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1416 %807 = call <32 x i32> @llvm.hexagon.V6.vmpabuuv(<32 x i32> %805, <32 x i32> %806)
1417 store volatile <32 x i32> %807, ptr @VectorPairResult, align 128
1418 %808 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1419 %809 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1420 %810 = call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %808, <32 x i32> %809, i32 -1)
1421 store volatile <32 x i32> %810, ptr @VectorPairResult, align 128
1422 %811 = load volatile <16 x i32>, ptr @vectors, align 64
1423 %812 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1424 %813 = call <32 x i32> @llvm.hexagon.V6.vmpybv(<16 x i32> %811, <16 x i32> %812)
1425 store volatile <32 x i32> %813, ptr @VectorPairResult, align 128
1426 %814 = load volatile <16 x i32>, ptr @vectors, align 64
1427 %815 = call <32 x i32> @llvm.hexagon.V6.vmpybus(<16 x i32> %814, i32 -1)
1428 store volatile <32 x i32> %815, ptr @VectorPairResult, align 128
1429 %816 = load volatile <16 x i32>, ptr @vectors, align 64
1430 %817 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1431 %818 = call <32 x i32> @llvm.hexagon.V6.vmpybusv(<16 x i32> %816, <16 x i32> %817)
1432 store volatile <32 x i32> %818, ptr @VectorPairResult, align 128
1433 %819 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1434 %820 = load volatile <16 x i32>, ptr @vectors, align 64
1435 %821 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1436 %822 = call <32 x i32> @llvm.hexagon.V6.vmpybv.acc(<32 x i32> %819, <16 x i32> %820, <16 x i32> %821)
1437 store volatile <32 x i32> %822, ptr @VectorPairResult, align 128
1438 %823 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1439 %824 = load volatile <16 x i32>, ptr @vectors, align 64
1440 %825 = call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %823, <16 x i32> %824, i32 -1)
1441 store volatile <32 x i32> %825, ptr @VectorPairResult, align 128
1442 %826 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1443 %827 = load volatile <16 x i32>, ptr @vectors, align 64
1444 %828 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1445 %829 = call <32 x i32> @llvm.hexagon.V6.vmpybusv.acc(<32 x i32> %826, <16 x i32> %827, <16 x i32> %828)
1446 store volatile <32 x i32> %829, ptr @VectorPairResult, align 128
1447 %830 = load volatile <16 x i32>, ptr @vectors, align 64
1448 %831 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1449 %832 = call <32 x i32> @llvm.hexagon.V6.vshufoeh(<16 x i32> %830, <16 x i32> %831)
1450 store volatile <32 x i32> %832, ptr @VectorPairResult, align 128
1451 %833 = load volatile <16 x i32>, ptr @vectors, align 64
1452 %834 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1453 %835 = call <32 x i32> @llvm.hexagon.V6.vsububh(<16 x i32> %833, <16 x i32> %834)
1454 store volatile <32 x i32> %835, ptr @VectorPairResult, align 128
1455 %836 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1456 %837 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1457 %838 = call <32 x i32> @llvm.hexagon.V6.vsubh.dv(<32 x i32> %836, <32 x i32> %837)
1458 store volatile <32 x i32> %838, ptr @VectorPairResult, align 128
1459 %839 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1460 %840 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1461 %841 = call <32 x i32> @llvm.hexagon.V6.vsubhsat.dv(<32 x i32> %839, <32 x i32> %840)
1462 store volatile <32 x i32> %841, ptr @VectorPairResult, align 128
1463 %842 = load volatile <16 x i32>, ptr @vectors, align 64
1464 %843 = call <32 x i32> @llvm.hexagon.V6.vsb(<16 x i32> %842)
1465 store volatile <32 x i32> %843, ptr @VectorPairResult, align 128
1466 %844 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1467 %845 = call <32 x i32> @llvm.hexagon.V6.vtmpyb(<32 x i32> %844, i32 -1)
1468 store volatile <32 x i32> %845, ptr @VectorPairResult, align 128
1469 %846 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1470 %847 = call <32 x i32> @llvm.hexagon.V6.vtmpybus(<32 x i32> %846, i32 -1)
1471 store volatile <32 x i32> %847, ptr @VectorPairResult, align 128
1472 %848 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1473 %849 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1474 %850 = call <32 x i32> @llvm.hexagon.V6.vtmpyb.acc(<32 x i32> %848, <32 x i32> %849, i32 -1)
1475 store volatile <32 x i32> %850, ptr @VectorPairResult, align 128
1476 %851 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1477 %852 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1478 %853 = call <32 x i32> @llvm.hexagon.V6.vtmpybus.acc(<32 x i32> %851, <32 x i32> %852, i32 -1)
1479 store volatile <32 x i32> %853, ptr @VectorPairResult, align 128
1480 %854 = load volatile <16 x i32>, ptr @vectors, align 64
1481 %855 = call <32 x i32> @llvm.hexagon.V6.vunpackb(<16 x i32> %854)
1482 store volatile <32 x i32> %855, ptr @VectorPairResult, align 128
1483 %856 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1484 %857 = load volatile <16 x i32>, ptr @vectors, align 64
1485 %858 = call <32 x i32> @llvm.hexagon.V6.vunpackob(<32 x i32> %856, <16 x i32> %857)
1486 store volatile <32 x i32> %858, ptr @VectorPairResult, align 128
1487 %859 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1488 %860 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1489 %861 = call <32 x i32> @llvm.hexagon.V6.vaddubsat.dv(<32 x i32> %859, <32 x i32> %860)
1490 store volatile <32 x i32> %861, ptr @VectorPairResult, align 128
1491 %862 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1492 %863 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1493 %864 = call <32 x i32> @llvm.hexagon.V6.vsububsat.dv(<32 x i32> %862, <32 x i32> %863)
1494 store volatile <32 x i32> %864, ptr @VectorPairResult, align 128
1495 %865 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1496 %866 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1497 %867 = call <32 x i32> @llvm.hexagon.V6.vadduhsat.dv(<32 x i32> %865, <32 x i32> %866)
1498 store volatile <32 x i32> %867, ptr @VectorPairResult, align 128
1499 %868 = load volatile <16 x i32>, ptr @vectors, align 64
1500 %869 = call <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32> %868, i32 -1)
1501 store volatile <32 x i32> %869, ptr @VectorPairResult, align 128
1502 %870 = load volatile <16 x i32>, ptr @vectors, align 64
1503 %871 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1504 %872 = call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> %870, <16 x i32> %871)
1505 store volatile <32 x i32> %872, ptr @VectorPairResult, align 128
1506 %873 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1507 %874 = load volatile <16 x i32>, ptr @vectors, align 64
1508 %875 = call <32 x i32> @llvm.hexagon.V6.vmpyub.acc(<32 x i32> %873, <16 x i32> %874, i32 -1)
1509 store volatile <32 x i32> %875, ptr @VectorPairResult, align 128
1510 %876 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1511 %877 = load volatile <16 x i32>, ptr @vectors, align 64
1512 %878 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1513 %879 = call <32 x i32> @llvm.hexagon.V6.vmpyubv.acc(<32 x i32> %876, <16 x i32> %877, <16 x i32> %878)
1514 store volatile <32 x i32> %879, ptr @VectorPairResult, align 128
1515 %880 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1516 %881 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1517 %882 = call <32 x i32> @llvm.hexagon.V6.vsubuhsat.dv(<32 x i32> %880, <32 x i32> %881)
1518 store volatile <32 x i32> %882, ptr @VectorPairResult, align 128
1519 %883 = load volatile <16 x i32>, ptr @vectors, align 64
1520 %884 = call <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32> %883)
1521 store volatile <32 x i32> %884, ptr @VectorPairResult, align 128
1522 %885 = load volatile <16 x i32>, ptr @vectors, align 64
1523 %886 = call <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32> %885)
1524 store volatile <32 x i32> %886, ptr @VectorPairResult, align 128
1525 %887 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1526 %888 = call <32 x i32> @llvm.hexagon.V6.vdsaduh(<32 x i32> %887, i32 -1)
1527 store volatile <32 x i32> %888, ptr @VectorPairResult, align 128
1528 %889 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1529 %890 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1530 %891 = call <32 x i32> @llvm.hexagon.V6.vdsaduh.acc(<32 x i32> %889, <32 x i32> %890, i32 -1)
1531 store volatile <32 x i32> %891, ptr @VectorPairResult, align 128
1532 %892 = load volatile <16 x i32>, ptr @vectors, align 64
1533 %893 = call <32 x i32> @llvm.hexagon.V6.vmpyuh(<16 x i32> %892, i32 -1)
1534 store volatile <32 x i32> %893, ptr @VectorPairResult, align 128
1535 %894 = load volatile <16 x i32>, ptr @vectors, align 64
1536 %895 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1537 %896 = call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %894, <16 x i32> %895)
1538 store volatile <32 x i32> %896, ptr @VectorPairResult, align 128
1539 %897 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1540 %898 = load volatile <16 x i32>, ptr @vectors, align 64
1541 %899 = call <32 x i32> @llvm.hexagon.V6.vmpyuh.acc(<32 x i32> %897, <16 x i32> %898, i32 -1)
1542 store volatile <32 x i32> %899, ptr @VectorPairResult, align 128
1543 %900 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1544 %901 = load volatile <16 x i32>, ptr @vectors, align 64
1545 %902 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1546 %903 = call <32 x i32> @llvm.hexagon.V6.vmpyuhv.acc(<32 x i32> %900, <16 x i32> %901, <16 x i32> %902)
1547 store volatile <32 x i32> %903, ptr @VectorPairResult, align 128
1548 %904 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1549 %905 = call <32 x i32> @llvm.hexagon.V6.vrmpyubi(<32 x i32> %904, i32 -1, i32 0)
1550 store volatile <32 x i32> %905, ptr @VectorPairResult, align 128
1551 %906 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1552 %907 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1553 %908 = call <32 x i32> @llvm.hexagon.V6.vrmpyubi.acc(<32 x i32> %906, <32 x i32> %907, i32 -1, i32 0)
1554 store volatile <32 x i32> %908, ptr @VectorPairResult, align 128
1555 %909 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1556 %910 = call <32 x i32> @llvm.hexagon.V6.vrsadubi(<32 x i32> %909, i32 -1, i32 0)
1557 store volatile <32 x i32> %910, ptr @VectorPairResult, align 128
1558 %911 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1559 %912 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1560 %913 = call <32 x i32> @llvm.hexagon.V6.vrsadubi.acc(<32 x i32> %911, <32 x i32> %912, i32 -1, i32 0)
1561 store volatile <32 x i32> %913, ptr @VectorPairResult, align 128
1562 %914 = load volatile <16 x i32>, ptr @vectors, align 64
1563 %915 = call <32 x i32> @llvm.hexagon.V6.vunpackuh(<16 x i32> %914)
1564 store volatile <32 x i32> %915, ptr @VectorPairResult, align 128
1565 %916 = load volatile <16 x i32>, ptr @vectors, align 64
1566 %917 = call <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32> %916)
1567 store volatile <32 x i32> %917, ptr @VectorPairResult, align 128
1568 %918 = load volatile <16 x i32>, ptr @vectors, align 64
1569 %919 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1570 %920 = call <32 x i32> @llvm.hexagon.V6.vaddhw(<16 x i32> %918, <16 x i32> %919)
1571 store volatile <32 x i32> %920, ptr @VectorPairResult, align 128
1572 %921 = load volatile <16 x i32>, ptr @vectors, align 64
1573 %922 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1574 %923 = call <32 x i32> @llvm.hexagon.V6.vadduhw(<16 x i32> %921, <16 x i32> %922)
1575 store volatile <32 x i32> %923, ptr @VectorPairResult, align 128
1576 %924 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1577 %925 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1578 %926 = call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %924, <32 x i32> %925)
1579 store volatile <32 x i32> %926, ptr @VectorPairResult, align 128
1580 %927 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1581 %928 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1582 %929 = call <32 x i32> @llvm.hexagon.V6.vaddwsat.dv(<32 x i32> %927, <32 x i32> %928)
1583 store volatile <32 x i32> %929, ptr @VectorPairResult, align 128
1584 %930 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1585 %931 = call <32 x i32> @llvm.hexagon.V6.vdmpyhb.dv(<32 x i32> %930, i32 -1)
1586 store volatile <32 x i32> %931, ptr @VectorPairResult, align 128
1587 %932 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1588 %933 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1589 %934 = call <32 x i32> @llvm.hexagon.V6.vdmpyhb.dv.acc(<32 x i32> %932, <32 x i32> %933, i32 -1)
1590 store volatile <32 x i32> %934, ptr @VectorPairResult, align 128
1591 %935 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1592 %936 = call <32 x i32> @llvm.hexagon.V6.vmpahb(<32 x i32> %935, i32 -1)
1593 store volatile <32 x i32> %936, ptr @VectorPairResult, align 128
1594 %937 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1595 %938 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1596 %939 = call <32 x i32> @llvm.hexagon.V6.vmpahb.acc(<32 x i32> %937, <32 x i32> %938, i32 -1)
1597 store volatile <32 x i32> %939, ptr @VectorPairResult, align 128
1598 %940 = load volatile <16 x i32>, ptr @vectors, align 64
1599 %941 = call <32 x i32> @llvm.hexagon.V6.vmpyh(<16 x i32> %940, i32 -1)
1600 store volatile <32 x i32> %941, ptr @VectorPairResult, align 128
1601 %942 = load volatile <16 x i32>, ptr @vectors, align 64
1602 %943 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1603 %944 = call <32 x i32> @llvm.hexagon.V6.vmpyhv(<16 x i32> %942, <16 x i32> %943)
1604 store volatile <32 x i32> %944, ptr @VectorPairResult, align 128
1605 %945 = load volatile <16 x i32>, ptr @vectors, align 64
1606 %946 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1607 %947 = call <32 x i32> @llvm.hexagon.V6.vmpyhus(<16 x i32> %945, <16 x i32> %946)
1608 store volatile <32 x i32> %947, ptr @VectorPairResult, align 128
1609 %948 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1610 %949 = load volatile <16 x i32>, ptr @vectors, align 64
1611 %950 = call <32 x i32> @llvm.hexagon.V6.vmpyhsat.acc(<32 x i32> %948, <16 x i32> %949, i32 -1)
1612 store volatile <32 x i32> %950, ptr @VectorPairResult, align 128
1613 %951 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1614 %952 = load volatile <16 x i32>, ptr @vectors, align 64
1615 %953 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1616 %954 = call <32 x i32> @llvm.hexagon.V6.vmpyhv.acc(<32 x i32> %951, <16 x i32> %952, <16 x i32> %953)
1617 store volatile <32 x i32> %954, ptr @VectorPairResult, align 128
1618 %955 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1619 %956 = load volatile <16 x i32>, ptr @vectors, align 64
1620 %957 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1621 %958 = call <32 x i32> @llvm.hexagon.V6.vmpyhus.acc(<32 x i32> %955, <16 x i32> %956, <16 x i32> %957)
1622 store volatile <32 x i32> %958, ptr @VectorPairResult, align 128
1623 %959 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1624 %960 = call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %959, i32 -1, i32 0)
1625 store volatile <32 x i32> %960, ptr @VectorPairResult, align 128
1626 %961 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1627 %962 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1628 %963 = call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %961, <32 x i32> %962, i32 -1, i32 0)
1629 store volatile <32 x i32> %963, ptr @VectorPairResult, align 128
1630 %964 = load volatile <16 x i32>, ptr @vectors, align 64
1631 %965 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1632 %966 = call <32 x i32> @llvm.hexagon.V6.vsubhw(<16 x i32> %964, <16 x i32> %965)
1633 store volatile <32 x i32> %966, ptr @VectorPairResult, align 128
1634 %967 = load volatile <16 x i32>, ptr @vectors, align 64
1635 %968 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
1636 %969 = call <32 x i32> @llvm.hexagon.V6.vsubuhw(<16 x i32> %967, <16 x i32> %968)
1637 store volatile <32 x i32> %969, ptr @VectorPairResult, align 128
1638 %970 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1639 %971 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1640 %972 = call <32 x i32> @llvm.hexagon.V6.vsubw.dv(<32 x i32> %970, <32 x i32> %971)
1641 store volatile <32 x i32> %972, ptr @VectorPairResult, align 128
1642 %973 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1643 %974 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1644 %975 = call <32 x i32> @llvm.hexagon.V6.vsubwsat.dv(<32 x i32> %973, <32 x i32> %974)
1645 store volatile <32 x i32> %975, ptr @VectorPairResult, align 128
1646 %976 = load volatile <16 x i32>, ptr @vectors, align 64
1647 %977 = call <32 x i32> @llvm.hexagon.V6.vsh(<16 x i32> %976)
1648 store volatile <32 x i32> %977, ptr @VectorPairResult, align 128
1649 %978 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1650 %979 = call <32 x i32> @llvm.hexagon.V6.vtmpyhb(<32 x i32> %978, i32 -1)
1651 store volatile <32 x i32> %979, ptr @VectorPairResult, align 128
1652 %980 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1653 %981 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
1654 %982 = call <32 x i32> @llvm.hexagon.V6.vtmpyhb.acc(<32 x i32> %980, <32 x i32> %981, i32 -1)
1655 store volatile <32 x i32> %982, ptr @VectorPairResult, align 128
1656 %983 = load volatile <16 x i32>, ptr @vectors, align 64
1657 %984 = call <32 x i32> @llvm.hexagon.V6.vunpackh(<16 x i32> %983)
1658 store volatile <32 x i32> %984, ptr @VectorPairResult, align 128
1659 %985 = load volatile <32 x i32>, ptr @vector_pairs, align 128
1660 %986 = load volatile <16 x i32>, ptr @vectors, align 64
1661 %987 = call <32 x i32> @llvm.hexagon.V6.vunpackoh(<32 x i32> %985, <16 x i32> %986)
1662 store volatile <32 x i32> %987, ptr @VectorPairResult, align 128
1666 ; Function Attrs: nounwind readnone
1667 declare <64 x i1> @llvm.hexagon.V6.pred.and(<64 x i1>, <64 x i1>) #1
1669 ; Function Attrs: nounwind readnone
1670 declare <64 x i1> @llvm.hexagon.V6.pred.and.n(<64 x i1>, <64 x i1>) #1
1672 ; Function Attrs: nounwind readnone
1673 declare <64 x i1> @llvm.hexagon.V6.pred.not(<64 x i1>) #1
1675 ; Function Attrs: nounwind readnone
1676 declare <64 x i1> @llvm.hexagon.V6.pred.or(<64 x i1>, <64 x i1>) #1
1678 ; Function Attrs: nounwind readnone
1679 declare <64 x i1> @llvm.hexagon.V6.pred.or.n(<64 x i1>, <64 x i1>) #1
1681 ; Function Attrs: nounwind readnone
1682 declare <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32>, i32) #1
1684 ; Function Attrs: nounwind readnone
1685 declare <64 x i1> @llvm.hexagon.V6.vandvrt.acc(<64 x i1>, <16 x i32>, i32) #1
1687 ; Function Attrs: nounwind readnone
1688 declare <64 x i1> @llvm.hexagon.V6.veqb(<16 x i32>, <16 x i32>) #1
1690 ; Function Attrs: nounwind readnone
1691 declare <64 x i1> @llvm.hexagon.V6.veqh(<16 x i32>, <16 x i32>) #1
1693 ; Function Attrs: nounwind readnone
1694 declare <64 x i1> @llvm.hexagon.V6.veqw(<16 x i32>, <16 x i32>) #1
1696 ; Function Attrs: nounwind readnone
1697 declare <64 x i1> @llvm.hexagon.V6.veqb.and(<64 x i1>, <16 x i32>, <16 x i32>) #1
1699 ; Function Attrs: nounwind readnone
1700 declare <64 x i1> @llvm.hexagon.V6.veqh.and(<64 x i1>, <16 x i32>, <16 x i32>) #1
1702 ; Function Attrs: nounwind readnone
1703 declare <64 x i1> @llvm.hexagon.V6.veqw.and(<64 x i1>, <16 x i32>, <16 x i32>) #1
1705 ; Function Attrs: nounwind readnone
1706 declare <64 x i1> @llvm.hexagon.V6.veqb.or(<64 x i1>, <16 x i32>, <16 x i32>) #1
1708 ; Function Attrs: nounwind readnone
1709 declare <64 x i1> @llvm.hexagon.V6.veqh.or(<64 x i1>, <16 x i32>, <16 x i32>) #1
1711 ; Function Attrs: nounwind readnone
1712 declare <64 x i1> @llvm.hexagon.V6.veqw.or(<64 x i1>, <16 x i32>, <16 x i32>) #1
1714 ; Function Attrs: nounwind readnone
1715 declare <64 x i1> @llvm.hexagon.V6.veqb.xor(<64 x i1>, <16 x i32>, <16 x i32>) #1
1717 ; Function Attrs: nounwind readnone
1718 declare <64 x i1> @llvm.hexagon.V6.veqh.xor(<64 x i1>, <16 x i32>, <16 x i32>) #1
1720 ; Function Attrs: nounwind readnone
1721 declare <64 x i1> @llvm.hexagon.V6.veqw.xor(<64 x i1>, <16 x i32>, <16 x i32>) #1
1723 ; Function Attrs: nounwind readnone
1724 declare <64 x i1> @llvm.hexagon.V6.vgtb(<16 x i32>, <16 x i32>) #1
1726 ; Function Attrs: nounwind readnone
1727 declare <64 x i1> @llvm.hexagon.V6.vgth(<16 x i32>, <16 x i32>) #1
1729 ; Function Attrs: nounwind readnone
1730 declare <64 x i1> @llvm.hexagon.V6.vgtub(<16 x i32>, <16 x i32>) #1
1732 ; Function Attrs: nounwind readnone
1733 declare <64 x i1> @llvm.hexagon.V6.vgtuh(<16 x i32>, <16 x i32>) #1
1735 ; Function Attrs: nounwind readnone
1736 declare <64 x i1> @llvm.hexagon.V6.vgtuw(<16 x i32>, <16 x i32>) #1
1738 ; Function Attrs: nounwind readnone
1739 declare <64 x i1> @llvm.hexagon.V6.vgtw(<16 x i32>, <16 x i32>) #1
1741 ; Function Attrs: nounwind readnone
1742 declare <64 x i1> @llvm.hexagon.V6.vgtb.and(<64 x i1>, <16 x i32>, <16 x i32>) #1
1744 ; Function Attrs: nounwind readnone
1745 declare <64 x i1> @llvm.hexagon.V6.vgth.and(<64 x i1>, <16 x i32>, <16 x i32>) #1
1747 ; Function Attrs: nounwind readnone
1748 declare <64 x i1> @llvm.hexagon.V6.vgtub.and(<64 x i1>, <16 x i32>, <16 x i32>) #1
1750 ; Function Attrs: nounwind readnone
1751 declare <64 x i1> @llvm.hexagon.V6.vgtuh.and(<64 x i1>, <16 x i32>, <16 x i32>) #1
1753 ; Function Attrs: nounwind readnone
1754 declare <64 x i1> @llvm.hexagon.V6.vgtuw.and(<64 x i1>, <16 x i32>, <16 x i32>) #1
1756 ; Function Attrs: nounwind readnone
1757 declare <64 x i1> @llvm.hexagon.V6.vgtw.and(<64 x i1>, <16 x i32>, <16 x i32>) #1
1759 ; Function Attrs: nounwind readnone
1760 declare <64 x i1> @llvm.hexagon.V6.vgtb.or(<64 x i1>, <16 x i32>, <16 x i32>) #1
1762 ; Function Attrs: nounwind readnone
1763 declare <64 x i1> @llvm.hexagon.V6.vgth.or(<64 x i1>, <16 x i32>, <16 x i32>) #1
1765 ; Function Attrs: nounwind readnone
1766 declare <64 x i1> @llvm.hexagon.V6.vgtub.or(<64 x i1>, <16 x i32>, <16 x i32>) #1
1768 ; Function Attrs: nounwind readnone
1769 declare <64 x i1> @llvm.hexagon.V6.vgtuh.or(<64 x i1>, <16 x i32>, <16 x i32>) #1
1771 ; Function Attrs: nounwind readnone
1772 declare <64 x i1> @llvm.hexagon.V6.vgtuw.or(<64 x i1>, <16 x i32>, <16 x i32>) #1
1774 ; Function Attrs: nounwind readnone
1775 declare <64 x i1> @llvm.hexagon.V6.vgtw.or(<64 x i1>, <16 x i32>, <16 x i32>) #1
1777 ; Function Attrs: nounwind readnone
1778 declare <64 x i1> @llvm.hexagon.V6.vgtb.xor(<64 x i1>, <16 x i32>, <16 x i32>) #1
1780 ; Function Attrs: nounwind readnone
1781 declare <64 x i1> @llvm.hexagon.V6.vgth.xor(<64 x i1>, <16 x i32>, <16 x i32>) #1
1783 ; Function Attrs: nounwind readnone
1784 declare <64 x i1> @llvm.hexagon.V6.vgtub.xor(<64 x i1>, <16 x i32>, <16 x i32>) #1
1786 ; Function Attrs: nounwind readnone
1787 declare <64 x i1> @llvm.hexagon.V6.vgtuh.xor(<64 x i1>, <16 x i32>, <16 x i32>) #1
1789 ; Function Attrs: nounwind readnone
1790 declare <64 x i1> @llvm.hexagon.V6.vgtuw.xor(<64 x i1>, <16 x i32>, <16 x i32>) #1
1792 ; Function Attrs: nounwind readnone
1793 declare <64 x i1> @llvm.hexagon.V6.vgtw.xor(<64 x i1>, <16 x i32>, <16 x i32>) #1
1795 ; Function Attrs: nounwind readnone
1796 declare <64 x i1> @llvm.hexagon.V6.pred.scalar2(i32) #1
1798 ; Function Attrs: nounwind readnone
1799 declare <64 x i1> @llvm.hexagon.V6.pred.xor(<64 x i1>, <64 x i1>) #1
1801 ; Function Attrs: nounwind readnone
1802 declare <16 x i32> @llvm.hexagon.V6.vassign(<16 x i32>) #1
1804 ; Function Attrs: nounwind readnone
1805 declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
1807 ; Function Attrs: nounwind readnone
1808 declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
1810 ; Function Attrs: nounwind readnone
1811 declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x i32>, i32) #1
1813 ; Function Attrs: nounwind readnone
1814 declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>, i32) #1
1816 ; Function Attrs: nounwind readnone
1817 declare <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1>, i32) #1
1819 ; Function Attrs: nounwind readnone
1820 declare <16 x i32> @llvm.hexagon.V6.vand(<16 x i32>, <16 x i32>) #1
1822 ; Function Attrs: nounwind readnone
1823 declare <16 x i32> @llvm.hexagon.V6.vandqrt.acc(<16 x i32>, <64 x i1>, i32) #1
1825 ; Function Attrs: nounwind readnone
1826 declare <16 x i32> @llvm.hexagon.V6.vdelta(<16 x i32>, <16 x i32>) #1
1828 ; Function Attrs: nounwind readnone
1829 declare <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>, <16 x i32>, i32) #1
1831 ; Function Attrs: nounwind readnone
1832 declare <16 x i32> @llvm.hexagon.V6.vlalignb(<16 x i32>, <16 x i32>, i32) #1
1834 ; Function Attrs: nounwind readnone
1835 declare <16 x i32> @llvm.hexagon.V6.vmux(<64 x i1>, <16 x i32>, <16 x i32>) #1
1837 ; Function Attrs: nounwind readnone
1838 declare <16 x i32> @llvm.hexagon.V6.vnot(<16 x i32>) #1
1840 ; Function Attrs: nounwind readnone
1841 declare <16 x i32> @llvm.hexagon.V6.vor(<16 x i32>, <16 x i32>) #1
1843 ; Function Attrs: nounwind readnone
1844 declare <16 x i32> @llvm.hexagon.V6.vrdelta(<16 x i32>, <16 x i32>) #1
1846 ; Function Attrs: nounwind readnone
1847 declare <16 x i32> @llvm.hexagon.V6.vror(<16 x i32>, i32) #1
1849 ; Function Attrs: nounwind readnone
1850 declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #1
1852 ; Function Attrs: nounwind readnone
1853 declare <16 x i32> @llvm.hexagon.V6.vxor(<16 x i32>, <16 x i32>) #1
1855 ; Function Attrs: nounwind readnone
1856 declare <16 x i32> @llvm.hexagon.V6.vd0() #1
1858 ; Function Attrs: nounwind readnone
1859 declare <16 x i32> @llvm.hexagon.V6.vaddbq(<64 x i1>, <16 x i32>, <16 x i32>) #1
1861 ; Function Attrs: nounwind readnone
1862 declare <16 x i32> @llvm.hexagon.V6.vaddbnq(<64 x i1>, <16 x i32>, <16 x i32>) #1
1864 ; Function Attrs: nounwind readnone
1865 declare <16 x i32> @llvm.hexagon.V6.vsubbq(<64 x i1>, <16 x i32>, <16 x i32>) #1
1867 ; Function Attrs: nounwind readnone
1868 declare <16 x i32> @llvm.hexagon.V6.vsubbnq(<64 x i1>, <16 x i32>, <16 x i32>) #1
1870 ; Function Attrs: nounwind readnone
1871 declare <16 x i32> @llvm.hexagon.V6.vaddb(<16 x i32>, <16 x i32>) #1
1873 ; Function Attrs: nounwind readnone
1874 declare <16 x i32> @llvm.hexagon.V6.vasrhbrndsat(<16 x i32>, <16 x i32>, i32) #1
1876 ; Function Attrs: nounwind readnone
1877 declare <16 x i32> @llvm.hexagon.V6.vdealb(<16 x i32>) #1
1879 ; Function Attrs: nounwind readnone
1880 declare <16 x i32> @llvm.hexagon.V6.vdealb4w(<16 x i32>, <16 x i32>) #1
1882 ; Function Attrs: nounwind readnone
1883 declare <16 x i32> @llvm.hexagon.V6.vlutvvb(<16 x i32>, <16 x i32>, i32) #1
1885 ; Function Attrs: nounwind readnone
1886 declare <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32>, <16 x i32>, <16 x i32>, i32) #1
1888 ; Function Attrs: nounwind readnone
1889 declare <16 x i32> @llvm.hexagon.V6.vnavgub(<16 x i32>, <16 x i32>) #1
1891 ; Function Attrs: nounwind readnone
1892 declare <16 x i32> @llvm.hexagon.V6.vpackhb.sat(<16 x i32>, <16 x i32>) #1
1894 ; Function Attrs: nounwind readnone
1895 declare <16 x i32> @llvm.hexagon.V6.vpackeb(<16 x i32>, <16 x i32>) #1
1897 ; Function Attrs: nounwind readnone
1898 declare <16 x i32> @llvm.hexagon.V6.vpackob(<16 x i32>, <16 x i32>) #1
1900 ; Function Attrs: nounwind readnone
1901 declare <16 x i32> @llvm.hexagon.V6.vroundhb(<16 x i32>, <16 x i32>) #1
1903 ; Function Attrs: nounwind readnone
1904 declare <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32>) #1
1906 ; Function Attrs: nounwind readnone
1907 declare <16 x i32> @llvm.hexagon.V6.vshuffeb(<16 x i32>, <16 x i32>) #1
1909 ; Function Attrs: nounwind readnone
1910 declare <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32>, <16 x i32>) #1
1912 ; Function Attrs: nounwind readnone
1913 declare <16 x i32> @llvm.hexagon.V6.vsubb(<16 x i32>, <16 x i32>) #1
1915 ; Function Attrs: nounwind readnone
1916 declare <16 x i32> @llvm.hexagon.V6.vaddhq(<64 x i1>, <16 x i32>, <16 x i32>) #1
1918 ; Function Attrs: nounwind readnone
1919 declare <16 x i32> @llvm.hexagon.V6.vaddhnq(<64 x i1>, <16 x i32>, <16 x i32>) #1
1921 ; Function Attrs: nounwind readnone
1922 declare <16 x i32> @llvm.hexagon.V6.vsubhq(<64 x i1>, <16 x i32>, <16 x i32>) #1
1924 ; Function Attrs: nounwind readnone
1925 declare <16 x i32> @llvm.hexagon.V6.vsubhnq(<64 x i1>, <16 x i32>, <16 x i32>) #1
1927 ; Function Attrs: nounwind readnone
1928 declare <16 x i32> @llvm.hexagon.V6.vabsh(<16 x i32>) #1
1930 ; Function Attrs: nounwind readnone
1931 declare <16 x i32> @llvm.hexagon.V6.vabsh.sat(<16 x i32>) #1
1933 ; Function Attrs: nounwind readnone
1934 declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #1
1936 ; Function Attrs: nounwind readnone
1937 declare <16 x i32> @llvm.hexagon.V6.vaddhsat(<16 x i32>, <16 x i32>) #1
1939 ; Function Attrs: nounwind readnone
1940 declare <16 x i32> @llvm.hexagon.V6.vaslh(<16 x i32>, i32) #1
1942 ; Function Attrs: nounwind readnone
1943 declare <16 x i32> @llvm.hexagon.V6.vaslhv(<16 x i32>, <16 x i32>) #1
1945 ; Function Attrs: nounwind readnone
1946 declare <16 x i32> @llvm.hexagon.V6.vasrh(<16 x i32>, i32) #1
1948 ; Function Attrs: nounwind readnone
1949 declare <16 x i32> @llvm.hexagon.V6.vasrhv(<16 x i32>, <16 x i32>) #1
1951 ; Function Attrs: nounwind readnone
1952 declare <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32>, <16 x i32>, i32) #1
1954 ; Function Attrs: nounwind readnone
1955 declare <16 x i32> @llvm.hexagon.V6.vasrwhrndsat(<16 x i32>, <16 x i32>, i32) #1
1957 ; Function Attrs: nounwind readnone
1958 declare <16 x i32> @llvm.hexagon.V6.vasrwhsat(<16 x i32>, <16 x i32>, i32) #1
1960 ; Function Attrs: nounwind readnone
1961 declare <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32>, <16 x i32>) #1
1963 ; Function Attrs: nounwind readnone
1964 declare <16 x i32> @llvm.hexagon.V6.vavghrnd(<16 x i32>, <16 x i32>) #1
1966 ; Function Attrs: nounwind readnone
1967 declare <16 x i32> @llvm.hexagon.V6.vdealh(<16 x i32>) #1
1969 ; Function Attrs: nounwind readnone
1970 declare <16 x i32> @llvm.hexagon.V6.vdmpybus(<16 x i32>, i32) #1
1972 ; Function Attrs: nounwind readnone
1973 declare <16 x i32> @llvm.hexagon.V6.vdmpybus.acc(<16 x i32>, <16 x i32>, i32) #1
1975 ; Function Attrs: nounwind readnone
1976 declare <16 x i32> @llvm.hexagon.V6.vlsrhv(<16 x i32>, <16 x i32>) #1
1978 ; Function Attrs: nounwind readnone
1979 declare <16 x i32> @llvm.hexagon.V6.vmaxh(<16 x i32>, <16 x i32>) #1
1981 ; Function Attrs: nounwind readnone
1982 declare <16 x i32> @llvm.hexagon.V6.vminh(<16 x i32>, <16 x i32>) #1
1984 ; Function Attrs: nounwind readnone
1985 declare <16 x i32> @llvm.hexagon.V6.vmpyhsrs(<16 x i32>, i32) #1
1987 ; Function Attrs: nounwind readnone
1988 declare <16 x i32> @llvm.hexagon.V6.vmpyhss(<16 x i32>, i32) #1
1990 ; Function Attrs: nounwind readnone
1991 declare <16 x i32> @llvm.hexagon.V6.vmpyhvsrs(<16 x i32>, <16 x i32>) #1
1993 ; Function Attrs: nounwind readnone
1994 declare <16 x i32> @llvm.hexagon.V6.vmpyihb(<16 x i32>, i32) #1
1996 ; Function Attrs: nounwind readnone
1997 declare <16 x i32> @llvm.hexagon.V6.vmpyih(<16 x i32>, <16 x i32>) #1
1999 ; Function Attrs: nounwind readnone
2000 declare <16 x i32> @llvm.hexagon.V6.vmpyihb.acc(<16 x i32>, <16 x i32>, i32) #1
2002 ; Function Attrs: nounwind readnone
2003 declare <16 x i32> @llvm.hexagon.V6.vmpyih.acc(<16 x i32>, <16 x i32>, <16 x i32>) #1
2005 ; Function Attrs: nounwind readnone
2006 declare <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32>, <16 x i32>) #1
2008 ; Function Attrs: nounwind readnone
2009 declare <16 x i32> @llvm.hexagon.V6.vnormamth(<16 x i32>) #1
2011 ; Function Attrs: nounwind readnone
2012 declare <16 x i32> @llvm.hexagon.V6.vpackwh.sat(<16 x i32>, <16 x i32>) #1
2014 ; Function Attrs: nounwind readnone
2015 declare <16 x i32> @llvm.hexagon.V6.vpackeh(<16 x i32>, <16 x i32>) #1
2017 ; Function Attrs: nounwind readnone
2018 declare <16 x i32> @llvm.hexagon.V6.vpackoh(<16 x i32>, <16 x i32>) #1
2020 ; Function Attrs: nounwind readnone
2021 declare <16 x i32> @llvm.hexagon.V6.vpopcounth(<16 x i32>) #1
2023 ; Function Attrs: nounwind readnone
2024 declare <16 x i32> @llvm.hexagon.V6.vroundwh(<16 x i32>, <16 x i32>) #1
2026 ; Function Attrs: nounwind readnone
2027 declare <16 x i32> @llvm.hexagon.V6.vsatwh(<16 x i32>, <16 x i32>) #1
2029 ; Function Attrs: nounwind readnone
2030 declare <16 x i32> @llvm.hexagon.V6.vshuffh(<16 x i32>) #1
2032 ; Function Attrs: nounwind readnone
2033 declare <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32>, <16 x i32>) #1
2035 ; Function Attrs: nounwind readnone
2036 declare <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32>, <16 x i32>) #1
2038 ; Function Attrs: nounwind readnone
2039 declare <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32>, <16 x i32>) #1
2041 ; Function Attrs: nounwind readnone
2042 declare <16 x i32> @llvm.hexagon.V6.vsubhsat(<16 x i32>, <16 x i32>) #1
2044 ; Function Attrs: nounwind readnone
2045 declare <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32>, <16 x i32>) #1
2047 ; Function Attrs: nounwind readnone
2048 declare <16 x i32> @llvm.hexagon.V6.vaddubsat(<16 x i32>, <16 x i32>) #1
2050 ; Function Attrs: nounwind readnone
2051 declare <16 x i32> @llvm.hexagon.V6.vasrhubrndsat(<16 x i32>, <16 x i32>, i32) #1
2053 ; Function Attrs: nounwind readnone
2054 declare <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32>, <16 x i32>, i32) #1
2056 ; Function Attrs: nounwind readnone
2057 declare <16 x i32> @llvm.hexagon.V6.vavgub(<16 x i32>, <16 x i32>) #1
2059 ; Function Attrs: nounwind readnone
2060 declare <16 x i32> @llvm.hexagon.V6.vavgubrnd(<16 x i32>, <16 x i32>) #1
2062 ; Function Attrs: nounwind readnone
2063 declare <16 x i32> @llvm.hexagon.V6.vmaxub(<16 x i32>, <16 x i32>) #1
2065 ; Function Attrs: nounwind readnone
2066 declare <16 x i32> @llvm.hexagon.V6.vminub(<16 x i32>, <16 x i32>) #1
2068 ; Function Attrs: nounwind readnone
2069 declare <16 x i32> @llvm.hexagon.V6.vpackhub.sat(<16 x i32>, <16 x i32>) #1
2071 ; Function Attrs: nounwind readnone
2072 declare <16 x i32> @llvm.hexagon.V6.vroundhub(<16 x i32>, <16 x i32>) #1
2074 ; Function Attrs: nounwind readnone
2075 declare <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>, <16 x i32>) #1
2077 ; Function Attrs: nounwind readnone
2078 declare <16 x i32> @llvm.hexagon.V6.vsububsat(<16 x i32>, <16 x i32>) #1
2080 ; Function Attrs: nounwind readnone
2081 declare <16 x i32> @llvm.hexagon.V6.vabsdiffh(<16 x i32>, <16 x i32>) #1
2083 ; Function Attrs: nounwind readnone
2084 declare <16 x i32> @llvm.hexagon.V6.vabsdiffuh(<16 x i32>, <16 x i32>) #1
2086 ; Function Attrs: nounwind readnone
2087 declare <16 x i32> @llvm.hexagon.V6.vadduhsat(<16 x i32>, <16 x i32>) #1
2089 ; Function Attrs: nounwind readnone
2090 declare <16 x i32> @llvm.hexagon.V6.vasrwuhsat(<16 x i32>, <16 x i32>, i32) #1
2092 ; Function Attrs: nounwind readnone
2093 declare <16 x i32> @llvm.hexagon.V6.vavguh(<16 x i32>, <16 x i32>) #1
2095 ; Function Attrs: nounwind readnone
2096 declare <16 x i32> @llvm.hexagon.V6.vavguhrnd(<16 x i32>, <16 x i32>) #1
2098 ; Function Attrs: nounwind readnone
2099 declare <16 x i32> @llvm.hexagon.V6.vcl0h(<16 x i32>) #1
2101 ; Function Attrs: nounwind readnone
2102 declare <16 x i32> @llvm.hexagon.V6.vlsrh(<16 x i32>, i32) #1
2104 ; Function Attrs: nounwind readnone
2105 declare <16 x i32> @llvm.hexagon.V6.vmaxuh(<16 x i32>, <16 x i32>) #1
2107 ; Function Attrs: nounwind readnone
2108 declare <16 x i32> @llvm.hexagon.V6.vminuh(<16 x i32>, <16 x i32>) #1
2110 ; Function Attrs: nounwind readnone
2111 declare <16 x i32> @llvm.hexagon.V6.vpackwuh.sat(<16 x i32>, <16 x i32>) #1
2113 ; Function Attrs: nounwind readnone
2114 declare <16 x i32> @llvm.hexagon.V6.vroundwuh(<16 x i32>, <16 x i32>) #1
2116 ; Function Attrs: nounwind readnone
2117 declare <16 x i32> @llvm.hexagon.V6.vsubuhsat(<16 x i32>, <16 x i32>) #1
2119 ; Function Attrs: nounwind readnone
2120 declare <16 x i32> @llvm.hexagon.V6.vabsdiffw(<16 x i32>, <16 x i32>) #1
2122 ; Function Attrs: nounwind readnone
2123 declare <16 x i32> @llvm.hexagon.V6.vcl0w(<16 x i32>) #1
2125 ; Function Attrs: nounwind readnone
2126 declare <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32>, i32) #1
2128 ; Function Attrs: nounwind readnone
2129 declare <16 x i32> @llvm.hexagon.V6.vrmpyub(<16 x i32>, i32) #1
2131 ; Function Attrs: nounwind readnone
2132 declare <16 x i32> @llvm.hexagon.V6.vrmpyubv(<16 x i32>, <16 x i32>) #1
2134 ; Function Attrs: nounwind readnone
2135 declare <16 x i32> @llvm.hexagon.V6.vrmpyub.acc(<16 x i32>, <16 x i32>, i32) #1
2137 ; Function Attrs: nounwind readnone
2138 declare <16 x i32> @llvm.hexagon.V6.vrmpyubv.acc(<16 x i32>, <16 x i32>, <16 x i32>) #1
2140 ; Function Attrs: nounwind readnone
2141 declare <16 x i32> @llvm.hexagon.V6.vaddwq(<64 x i1>, <16 x i32>, <16 x i32>) #1
2143 ; Function Attrs: nounwind readnone
2144 declare <16 x i32> @llvm.hexagon.V6.vaddwnq(<64 x i1>, <16 x i32>, <16 x i32>) #1
2146 ; Function Attrs: nounwind readnone
2147 declare <16 x i32> @llvm.hexagon.V6.vsubwq(<64 x i1>, <16 x i32>, <16 x i32>) #1
2149 ; Function Attrs: nounwind readnone
2150 declare <16 x i32> @llvm.hexagon.V6.vsubwnq(<64 x i1>, <16 x i32>, <16 x i32>) #1
2152 ; Function Attrs: nounwind readnone
2153 declare <16 x i32> @llvm.hexagon.V6.vabsw(<16 x i32>) #1
2155 ; Function Attrs: nounwind readnone
2156 declare <16 x i32> @llvm.hexagon.V6.vabsw.sat(<16 x i32>) #1
2158 ; Function Attrs: nounwind readnone
2159 declare <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32>, <16 x i32>) #1
2161 ; Function Attrs: nounwind readnone
2162 declare <16 x i32> @llvm.hexagon.V6.vaddwsat(<16 x i32>, <16 x i32>) #1
2164 ; Function Attrs: nounwind readnone
2165 declare <16 x i32> @llvm.hexagon.V6.vaslw(<16 x i32>, i32) #1
2167 ; Function Attrs: nounwind readnone
2168 declare <16 x i32> @llvm.hexagon.V6.vaslwv(<16 x i32>, <16 x i32>) #1
2170 ; Function Attrs: nounwind readnone
2171 declare <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32>, <16 x i32>, i32) #1
2173 ; Function Attrs: nounwind readnone
2174 declare <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32>, i32) #1
2176 ; Function Attrs: nounwind readnone
2177 declare <16 x i32> @llvm.hexagon.V6.vasrwv(<16 x i32>, <16 x i32>) #1
2179 ; Function Attrs: nounwind readnone
2180 declare <16 x i32> @llvm.hexagon.V6.vasrw.acc(<16 x i32>, <16 x i32>, i32) #1
2182 ; Function Attrs: nounwind readnone
2183 declare <16 x i32> @llvm.hexagon.V6.vavgw(<16 x i32>, <16 x i32>) #1
2185 ; Function Attrs: nounwind readnone
2186 declare <16 x i32> @llvm.hexagon.V6.vavgwrnd(<16 x i32>, <16 x i32>) #1
2188 ; Function Attrs: nounwind readnone
2189 declare <16 x i32> @llvm.hexagon.V6.vdmpyhb(<16 x i32>, i32) #1
2191 ; Function Attrs: nounwind readnone
2192 declare <16 x i32> @llvm.hexagon.V6.vdmpyhsat(<16 x i32>, i32) #1
2194 ; Function Attrs: nounwind readnone
2195 declare <16 x i32> @llvm.hexagon.V6.vdmpyhsusat(<16 x i32>, i32) #1
2197 ; Function Attrs: nounwind readnone
2198 declare <16 x i32> @llvm.hexagon.V6.vdmpyhvsat(<16 x i32>, <16 x i32>) #1
2200 ; Function Attrs: nounwind readnone
2201 declare <16 x i32> @llvm.hexagon.V6.vdmpyhisat(<32 x i32>, i32) #1
2203 ; Function Attrs: nounwind readnone
2204 declare <16 x i32> @llvm.hexagon.V6.vdmpyhsuisat(<32 x i32>, i32) #1
2206 ; Function Attrs: nounwind readnone
2207 declare <16 x i32> @llvm.hexagon.V6.vdmpyhb.acc(<16 x i32>, <16 x i32>, i32) #1
2209 ; Function Attrs: nounwind readnone
2210 declare <16 x i32> @llvm.hexagon.V6.vdmpyhsat.acc(<16 x i32>, <16 x i32>, i32) #1
2212 ; Function Attrs: nounwind readnone
2213 declare <16 x i32> @llvm.hexagon.V6.vdmpyhsusat.acc(<16 x i32>, <16 x i32>, i32) #1
2215 ; Function Attrs: nounwind readnone
2216 declare <16 x i32> @llvm.hexagon.V6.vdmpyhvsat.acc(<16 x i32>, <16 x i32>, <16 x i32>) #1
2218 ; Function Attrs: nounwind readnone
2219 declare <16 x i32> @llvm.hexagon.V6.vdmpyhisat.acc(<16 x i32>, <32 x i32>, i32) #1
2221 ; Function Attrs: nounwind readnone
2222 declare <16 x i32> @llvm.hexagon.V6.vdmpyhsuisat.acc(<16 x i32>, <32 x i32>, i32) #1
2224 ; Function Attrs: nounwind readnone
2225 declare <16 x i32> @llvm.hexagon.V6.vinsertwr(<16 x i32>, i32) #1
2227 ; Function Attrs: nounwind readnone
2228 declare <16 x i32> @llvm.hexagon.V6.vlsrwv(<16 x i32>, <16 x i32>) #1
2230 ; Function Attrs: nounwind readnone
2231 declare <16 x i32> @llvm.hexagon.V6.vmaxw(<16 x i32>, <16 x i32>) #1
2233 ; Function Attrs: nounwind readnone
2234 declare <16 x i32> @llvm.hexagon.V6.vminw(<16 x i32>, <16 x i32>) #1
2236 ; Function Attrs: nounwind readnone
2237 declare <16 x i32> @llvm.hexagon.V6.vmpyewuh(<16 x i32>, <16 x i32>) #1
2239 ; Function Attrs: nounwind readnone
2240 declare <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32>, i32) #1
2242 ; Function Attrs: nounwind readnone
2243 declare <16 x i32> @llvm.hexagon.V6.vmpyiwh(<16 x i32>, i32) #1
2245 ; Function Attrs: nounwind readnone
2246 declare <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32>, <16 x i32>, i32) #1
2248 ; Function Attrs: nounwind readnone
2249 declare <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32>, <16 x i32>, i32) #1
2251 ; Function Attrs: nounwind readnone
2252 declare <16 x i32> @llvm.hexagon.V6.vmpyiewuh(<16 x i32>, <16 x i32>) #1
2254 ; Function Attrs: nounwind readnone
2255 declare <16 x i32> @llvm.hexagon.V6.vmpyiewh.acc(<16 x i32>, <16 x i32>, <16 x i32>) #1
2257 ; Function Attrs: nounwind readnone
2258 declare <16 x i32> @llvm.hexagon.V6.vmpyiewuh.acc(<16 x i32>, <16 x i32>, <16 x i32>) #1
2260 ; Function Attrs: nounwind readnone
2261 declare <16 x i32> @llvm.hexagon.V6.vmpyieoh(<16 x i32>, <16 x i32>) #1
2263 ; Function Attrs: nounwind readnone
2264 declare <16 x i32> @llvm.hexagon.V6.vmpyiowh(<16 x i32>, <16 x i32>) #1
2266 ; Function Attrs: nounwind readnone
2267 declare <16 x i32> @llvm.hexagon.V6.vmpyowh.rnd(<16 x i32>, <16 x i32>) #1
2269 ; Function Attrs: nounwind readnone
2270 declare <16 x i32> @llvm.hexagon.V6.vmpyowh(<16 x i32>, <16 x i32>) #1
2272 ; Function Attrs: nounwind readnone
2273 declare <16 x i32> @llvm.hexagon.V6.vmpyowh.rnd.sacc(<16 x i32>, <16 x i32>, <16 x i32>) #1
2275 ; Function Attrs: nounwind readnone
2276 declare <16 x i32> @llvm.hexagon.V6.vmpyowh.sacc(<16 x i32>, <16 x i32>, <16 x i32>) #1
2278 ; Function Attrs: nounwind readnone
2279 declare <16 x i32> @llvm.hexagon.V6.vnavgw(<16 x i32>, <16 x i32>) #1
2281 ; Function Attrs: nounwind readnone
2282 declare <16 x i32> @llvm.hexagon.V6.vnormamtw(<16 x i32>) #1
2284 ; Function Attrs: nounwind readnone
2285 declare <16 x i32> @llvm.hexagon.V6.vrmpybv(<16 x i32>, <16 x i32>) #1
2287 ; Function Attrs: nounwind readnone
2288 declare <16 x i32> @llvm.hexagon.V6.vrmpybus(<16 x i32>, i32) #1
2290 ; Function Attrs: nounwind readnone
2291 declare <16 x i32> @llvm.hexagon.V6.vrmpybusv(<16 x i32>, <16 x i32>) #1
2293 ; Function Attrs: nounwind readnone
2294 declare <16 x i32> @llvm.hexagon.V6.vrmpybv.acc(<16 x i32>, <16 x i32>, <16 x i32>) #1
2296 ; Function Attrs: nounwind readnone
2297 declare <16 x i32> @llvm.hexagon.V6.vrmpybus.acc(<16 x i32>, <16 x i32>, i32) #1
2299 ; Function Attrs: nounwind readnone
2300 declare <16 x i32> @llvm.hexagon.V6.vrmpybusv.acc(<16 x i32>, <16 x i32>, <16 x i32>) #1
2302 ; Function Attrs: nounwind readnone
2303 declare <16 x i32> @llvm.hexagon.V6.vsubw(<16 x i32>, <16 x i32>) #1
2305 ; Function Attrs: nounwind readnone
2306 declare <16 x i32> @llvm.hexagon.V6.vsubwsat(<16 x i32>, <16 x i32>) #1
2308 ; Function Attrs: nounwind readnone
2309 declare <32 x i32> @llvm.hexagon.V6.vassignp(<32 x i32>) #1
2311 ; Function Attrs: nounwind readnone
2312 declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
2314 ; Function Attrs: nounwind readnone
2315 declare <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32>, <16 x i32>, i32) #1
2317 ; Function Attrs: nounwind readnone
2318 declare <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32>, <16 x i32>, i32) #1
2320 ; Function Attrs: nounwind readnone
2321 declare <32 x i32> @llvm.hexagon.V6.vswap(<64 x i1>, <16 x i32>, <16 x i32>) #1
2323 ; Function Attrs: nounwind readnone
2324 declare <32 x i32> @llvm.hexagon.V6.vaddb.dv(<32 x i32>, <32 x i32>) #1
2326 ; Function Attrs: nounwind readnone
2327 declare <32 x i32> @llvm.hexagon.V6.vshufoeb(<16 x i32>, <16 x i32>) #1
2329 ; Function Attrs: nounwind readnone
2330 declare <32 x i32> @llvm.hexagon.V6.vsubb.dv(<32 x i32>, <32 x i32>) #1
2332 ; Function Attrs: nounwind readnone
2333 declare <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32>, <16 x i32>) #1
2335 ; Function Attrs: nounwind readnone
2336 declare <32 x i32> @llvm.hexagon.V6.vaddh.dv(<32 x i32>, <32 x i32>) #1
2338 ; Function Attrs: nounwind readnone
2339 declare <32 x i32> @llvm.hexagon.V6.vaddhsat.dv(<32 x i32>, <32 x i32>) #1
2341 ; Function Attrs: nounwind readnone
2342 declare <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32>, i32) #1
2344 ; Function Attrs: nounwind readnone
2345 declare <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32>, <32 x i32>, i32) #1
2347 ; Function Attrs: nounwind readnone
2348 declare <32 x i32> @llvm.hexagon.V6.vlutvwh(<16 x i32>, <16 x i32>, i32) #1
2350 ; Function Attrs: nounwind readnone
2351 declare <32 x i32> @llvm.hexagon.V6.vlutvwh.oracc(<32 x i32>, <16 x i32>, <16 x i32>, i32) #1
2353 ; Function Attrs: nounwind readnone
2354 declare <32 x i32> @llvm.hexagon.V6.vmpabus(<32 x i32>, i32) #1
2356 ; Function Attrs: nounwind readnone
2357 declare <32 x i32> @llvm.hexagon.V6.vmpabusv(<32 x i32>, <32 x i32>) #1
2359 ; Function Attrs: nounwind readnone
2360 declare <32 x i32> @llvm.hexagon.V6.vmpabuuv(<32 x i32>, <32 x i32>) #1
2362 ; Function Attrs: nounwind readnone
2363 declare <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32>, <32 x i32>, i32) #1
2365 ; Function Attrs: nounwind readnone
2366 declare <32 x i32> @llvm.hexagon.V6.vmpybv(<16 x i32>, <16 x i32>) #1
2368 ; Function Attrs: nounwind readnone
2369 declare <32 x i32> @llvm.hexagon.V6.vmpybus(<16 x i32>, i32) #1
2371 ; Function Attrs: nounwind readnone
2372 declare <32 x i32> @llvm.hexagon.V6.vmpybusv(<16 x i32>, <16 x i32>) #1
2374 ; Function Attrs: nounwind readnone
2375 declare <32 x i32> @llvm.hexagon.V6.vmpybv.acc(<32 x i32>, <16 x i32>, <16 x i32>) #1
2377 ; Function Attrs: nounwind readnone
2378 declare <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32>, <16 x i32>, i32) #1
2380 ; Function Attrs: nounwind readnone
2381 declare <32 x i32> @llvm.hexagon.V6.vmpybusv.acc(<32 x i32>, <16 x i32>, <16 x i32>) #1
2383 ; Function Attrs: nounwind readnone
2384 declare <32 x i32> @llvm.hexagon.V6.vshufoeh(<16 x i32>, <16 x i32>) #1
2386 ; Function Attrs: nounwind readnone
2387 declare <32 x i32> @llvm.hexagon.V6.vsububh(<16 x i32>, <16 x i32>) #1
2389 ; Function Attrs: nounwind readnone
2390 declare <32 x i32> @llvm.hexagon.V6.vsubh.dv(<32 x i32>, <32 x i32>) #1
2392 ; Function Attrs: nounwind readnone
2393 declare <32 x i32> @llvm.hexagon.V6.vsubhsat.dv(<32 x i32>, <32 x i32>) #1
2395 ; Function Attrs: nounwind readnone
2396 declare <32 x i32> @llvm.hexagon.V6.vsb(<16 x i32>) #1
2398 ; Function Attrs: nounwind readnone
2399 declare <32 x i32> @llvm.hexagon.V6.vtmpyb(<32 x i32>, i32) #1
2401 ; Function Attrs: nounwind readnone
2402 declare <32 x i32> @llvm.hexagon.V6.vtmpybus(<32 x i32>, i32) #1
2404 ; Function Attrs: nounwind readnone
2405 declare <32 x i32> @llvm.hexagon.V6.vtmpyb.acc(<32 x i32>, <32 x i32>, i32) #1
2407 ; Function Attrs: nounwind readnone
2408 declare <32 x i32> @llvm.hexagon.V6.vtmpybus.acc(<32 x i32>, <32 x i32>, i32) #1
2410 ; Function Attrs: nounwind readnone
2411 declare <32 x i32> @llvm.hexagon.V6.vunpackb(<16 x i32>) #1
2413 ; Function Attrs: nounwind readnone
2414 declare <32 x i32> @llvm.hexagon.V6.vunpackob(<32 x i32>, <16 x i32>) #1
2416 ; Function Attrs: nounwind readnone
2417 declare <32 x i32> @llvm.hexagon.V6.vaddubsat.dv(<32 x i32>, <32 x i32>) #1
2419 ; Function Attrs: nounwind readnone
2420 declare <32 x i32> @llvm.hexagon.V6.vsububsat.dv(<32 x i32>, <32 x i32>) #1
2422 ; Function Attrs: nounwind readnone
2423 declare <32 x i32> @llvm.hexagon.V6.vadduhsat.dv(<32 x i32>, <32 x i32>) #1
2425 ; Function Attrs: nounwind readnone
2426 declare <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32>, i32) #1
2428 ; Function Attrs: nounwind readnone
2429 declare <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32>, <16 x i32>) #1
2431 ; Function Attrs: nounwind readnone
2432 declare <32 x i32> @llvm.hexagon.V6.vmpyub.acc(<32 x i32>, <16 x i32>, i32) #1
2434 ; Function Attrs: nounwind readnone
2435 declare <32 x i32> @llvm.hexagon.V6.vmpyubv.acc(<32 x i32>, <16 x i32>, <16 x i32>) #1
2437 ; Function Attrs: nounwind readnone
2438 declare <32 x i32> @llvm.hexagon.V6.vsubuhsat.dv(<32 x i32>, <32 x i32>) #1
2440 ; Function Attrs: nounwind readnone
2441 declare <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32>) #1
2443 ; Function Attrs: nounwind readnone
2444 declare <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32>) #1
2446 ; Function Attrs: nounwind readnone
2447 declare <32 x i32> @llvm.hexagon.V6.vdsaduh(<32 x i32>, i32) #1
2449 ; Function Attrs: nounwind readnone
2450 declare <32 x i32> @llvm.hexagon.V6.vdsaduh.acc(<32 x i32>, <32 x i32>, i32) #1
2452 ; Function Attrs: nounwind readnone
2453 declare <32 x i32> @llvm.hexagon.V6.vmpyuh(<16 x i32>, i32) #1
2455 ; Function Attrs: nounwind readnone
2456 declare <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32>, <16 x i32>) #1
2458 ; Function Attrs: nounwind readnone
2459 declare <32 x i32> @llvm.hexagon.V6.vmpyuh.acc(<32 x i32>, <16 x i32>, i32) #1
2461 ; Function Attrs: nounwind readnone
2462 declare <32 x i32> @llvm.hexagon.V6.vmpyuhv.acc(<32 x i32>, <16 x i32>, <16 x i32>) #1
2464 ; Function Attrs: nounwind readnone
2465 declare <32 x i32> @llvm.hexagon.V6.vrmpyubi(<32 x i32>, i32, i32) #1
2467 ; Function Attrs: nounwind readnone
2468 declare <32 x i32> @llvm.hexagon.V6.vrmpyubi.acc(<32 x i32>, <32 x i32>, i32, i32) #1
2470 ; Function Attrs: nounwind readnone
2471 declare <32 x i32> @llvm.hexagon.V6.vrsadubi(<32 x i32>, i32, i32) #1
2473 ; Function Attrs: nounwind readnone
2474 declare <32 x i32> @llvm.hexagon.V6.vrsadubi.acc(<32 x i32>, <32 x i32>, i32, i32) #1
2476 ; Function Attrs: nounwind readnone
2477 declare <32 x i32> @llvm.hexagon.V6.vunpackuh(<16 x i32>) #1
2479 ; Function Attrs: nounwind readnone
2480 declare <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32>) #1
2482 ; Function Attrs: nounwind readnone
2483 declare <32 x i32> @llvm.hexagon.V6.vaddhw(<16 x i32>, <16 x i32>) #1
2485 ; Function Attrs: nounwind readnone
2486 declare <32 x i32> @llvm.hexagon.V6.vadduhw(<16 x i32>, <16 x i32>) #1
2488 ; Function Attrs: nounwind readnone
2489 declare <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32>, <32 x i32>) #1
2491 ; Function Attrs: nounwind readnone
2492 declare <32 x i32> @llvm.hexagon.V6.vaddwsat.dv(<32 x i32>, <32 x i32>) #1
2494 ; Function Attrs: nounwind readnone
2495 declare <32 x i32> @llvm.hexagon.V6.vdmpyhb.dv(<32 x i32>, i32) #1
2497 ; Function Attrs: nounwind readnone
2498 declare <32 x i32> @llvm.hexagon.V6.vdmpyhb.dv.acc(<32 x i32>, <32 x i32>, i32) #1
2500 ; Function Attrs: nounwind readnone
2501 declare <32 x i32> @llvm.hexagon.V6.vmpahb(<32 x i32>, i32) #1
2503 ; Function Attrs: nounwind readnone
2504 declare <32 x i32> @llvm.hexagon.V6.vmpahb.acc(<32 x i32>, <32 x i32>, i32) #1
2506 ; Function Attrs: nounwind readnone
2507 declare <32 x i32> @llvm.hexagon.V6.vmpyh(<16 x i32>, i32) #1
2509 ; Function Attrs: nounwind readnone
2510 declare <32 x i32> @llvm.hexagon.V6.vmpyhv(<16 x i32>, <16 x i32>) #1
2512 ; Function Attrs: nounwind readnone
2513 declare <32 x i32> @llvm.hexagon.V6.vmpyhus(<16 x i32>, <16 x i32>) #1
2515 ; Function Attrs: nounwind readnone
2516 declare <32 x i32> @llvm.hexagon.V6.vmpyhsat.acc(<32 x i32>, <16 x i32>, i32) #1
2518 ; Function Attrs: nounwind readnone
2519 declare <32 x i32> @llvm.hexagon.V6.vmpyhv.acc(<32 x i32>, <16 x i32>, <16 x i32>) #1
2521 ; Function Attrs: nounwind readnone
2522 declare <32 x i32> @llvm.hexagon.V6.vmpyhus.acc(<32 x i32>, <16 x i32>, <16 x i32>) #1
2524 ; Function Attrs: nounwind readnone
2525 declare <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32>, i32, i32) #1
2527 ; Function Attrs: nounwind readnone
2528 declare <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32>, <32 x i32>, i32, i32) #1
2530 ; Function Attrs: nounwind readnone
2531 declare <32 x i32> @llvm.hexagon.V6.vsubhw(<16 x i32>, <16 x i32>) #1
2533 ; Function Attrs: nounwind readnone
2534 declare <32 x i32> @llvm.hexagon.V6.vsubuhw(<16 x i32>, <16 x i32>) #1
2536 ; Function Attrs: nounwind readnone
2537 declare <32 x i32> @llvm.hexagon.V6.vsubw.dv(<32 x i32>, <32 x i32>) #1
2539 ; Function Attrs: nounwind readnone
2540 declare <32 x i32> @llvm.hexagon.V6.vsubwsat.dv(<32 x i32>, <32 x i32>) #1
2542 ; Function Attrs: nounwind readnone
2543 declare <32 x i32> @llvm.hexagon.V6.vsh(<16 x i32>) #1
2545 ; Function Attrs: nounwind readnone
2546 declare <32 x i32> @llvm.hexagon.V6.vtmpyhb(<32 x i32>, i32) #1
2548 ; Function Attrs: nounwind readnone
2549 declare <32 x i32> @llvm.hexagon.V6.vtmpyhb.acc(<32 x i32>, <32 x i32>, i32) #1
2551 ; Function Attrs: nounwind readnone
2552 declare <32 x i32> @llvm.hexagon.V6.vunpackh(<16 x i32>) #1
2554 ; Function Attrs: nounwind readnone
2555 declare <32 x i32> @llvm.hexagon.V6.vunpackoh(<32 x i32>, <16 x i32>) #1
2557 attributes #0 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" "unsafe-fp-math"="false" "use-soft-float"="false" }
2558 attributes #1 = { nounwind readnone }