1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals --version 2
2 ; RUN: opt -S -vector-library=ArmPL -replace-with-veclib < %s | FileCheck %s
4 target triple = "aarch64-unknown-linux-gnu"
7 ; The replace-with-veclib pass does not work with scalable types, thus
8 ; the mappings aren't utilised. Tests will need to be regenerated when the
12 declare <2 x double> @llvm.cos.v2f64(<2 x double>)
13 declare <4 x float> @llvm.cos.v4f32(<4 x float>)
14 declare <vscale x 2 x double> @llvm.cos.nxv2f64(<vscale x 2 x double>)
15 declare <vscale x 4 x float> @llvm.cos.nxv4f32(<vscale x 4 x float>)
19 ; CHECK: @llvm.compiler.used = appending global [68 x ptr] [ptr @armpl_vcosq_f64, ptr @armpl_vcosq_f32, ptr @armpl_svcos_f64_x, ptr @armpl_svcos_f32_x, ptr @armpl_vexpq_f64, ptr @armpl_vexpq_f32, ptr @armpl_svexp_f64_x, ptr @armpl_svexp_f32_x, ptr @armpl_vexp10q_f64, ptr @armpl_vexp10q_f32, ptr @armpl_svexp10_f64_x, ptr @armpl_svexp10_f32_x, ptr @armpl_vexp2q_f64, ptr @armpl_vexp2q_f32, ptr @armpl_svexp2_f64_x, ptr @armpl_svexp2_f32_x, ptr @armpl_vlogq_f64, ptr @armpl_vlogq_f32, ptr @armpl_svlog_f64_x, ptr @armpl_svlog_f32_x, ptr @armpl_vlog10q_f64, ptr @armpl_vlog10q_f32, ptr @armpl_svlog10_f64_x, ptr @armpl_svlog10_f32_x, ptr @armpl_vlog2q_f64, ptr @armpl_vlog2q_f32, ptr @armpl_svlog2_f64_x, ptr @armpl_svlog2_f32_x, ptr @armpl_vpowq_f64, ptr @armpl_vpowq_f32, ptr @armpl_svpow_f64_x, ptr @armpl_svpow_f32_x, ptr @armpl_vsinq_f64, ptr @armpl_vsinq_f32, ptr @armpl_svsin_f64_x, ptr @armpl_svsin_f32_x, ptr @armpl_vtanq_f64, ptr @armpl_vtanq_f32, ptr @armpl_svtan_f64_x, ptr @armpl_svtan_f32_x, ptr @armpl_vacosq_f64, ptr @armpl_vacosq_f32, ptr @armpl_svacos_f64_x, ptr @armpl_svacos_f32_x, ptr @armpl_vasinq_f64, ptr @armpl_vasinq_f32, ptr @armpl_svasin_f64_x, ptr @armpl_svasin_f32_x, ptr @armpl_vatanq_f64, ptr @armpl_vatanq_f32, ptr @armpl_svatan_f64_x, ptr @armpl_svatan_f32_x, ptr @armpl_vatan2q_f64, ptr @armpl_vatan2q_f32, ptr @armpl_svatan2_f64_x, ptr @armpl_svatan2_f32_x, ptr @armpl_vcoshq_f64, ptr @armpl_vcoshq_f32, ptr @armpl_svcosh_f64_x, ptr @armpl_svcosh_f32_x, ptr @armpl_vsinhq_f64, ptr @armpl_vsinhq_f32, ptr @armpl_svsinh_f64_x, ptr @armpl_svsinh_f32_x, ptr @armpl_vtanhq_f64, ptr @armpl_vtanhq_f32, ptr @armpl_svtanh_f64_x, ptr @armpl_svtanh_f32_x], section "llvm.metadata"
21 define <2 x double> @llvm_cos_f64(<2 x double> %in) {
22 ; CHECK-LABEL: define <2 x double> @llvm_cos_f64
23 ; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
24 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <2 x double> @armpl_vcosq_f64(<2 x double> [[IN]])
25 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
27 %1 = call fast <2 x double> @llvm.cos.v2f64(<2 x double> %in)
31 define <4 x float> @llvm_cos_f32(<4 x float> %in) {
32 ; CHECK-LABEL: define <4 x float> @llvm_cos_f32
33 ; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
34 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <4 x float> @armpl_vcosq_f32(<4 x float> [[IN]])
35 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
37 %1 = call fast <4 x float> @llvm.cos.v4f32(<4 x float> %in)
41 define <vscale x 2 x double> @llvm_cos_vscale_f64(<vscale x 2 x double> %in) #0 {
42 ; CHECK-LABEL: define <vscale x 2 x double> @llvm_cos_vscale_f64
43 ; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1:[0-9]+]] {
44 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svcos_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
45 ; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
47 %1 = call fast <vscale x 2 x double> @llvm.cos.nxv2f64(<vscale x 2 x double> %in)
48 ret <vscale x 2 x double> %1
51 define <vscale x 4 x float> @llvm_cos_vscale_f32(<vscale x 4 x float> %in) #0 {
52 ; CHECK-LABEL: define <vscale x 4 x float> @llvm_cos_vscale_f32
53 ; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
54 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svcos_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
55 ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
57 %1 = call fast <vscale x 4 x float> @llvm.cos.nxv4f32(<vscale x 4 x float> %in)
58 ret <vscale x 4 x float> %1
61 declare <2 x double> @llvm.exp.v2f64(<2 x double>)
62 declare <4 x float> @llvm.exp.v4f32(<4 x float>)
63 declare <vscale x 2 x double> @llvm.exp.nxv2f64(<vscale x 2 x double>)
64 declare <vscale x 4 x float> @llvm.exp.nxv4f32(<vscale x 4 x float>)
66 define <2 x double> @llvm_exp_f64(<2 x double> %in) {
67 ; CHECK-LABEL: define <2 x double> @llvm_exp_f64
68 ; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
69 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <2 x double> @armpl_vexpq_f64(<2 x double> [[IN]])
70 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
72 %1 = call fast <2 x double> @llvm.exp.v2f64(<2 x double> %in)
76 define <4 x float> @llvm_exp_f32(<4 x float> %in) {
77 ; CHECK-LABEL: define <4 x float> @llvm_exp_f32
78 ; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
79 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <4 x float> @armpl_vexpq_f32(<4 x float> [[IN]])
80 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
82 %1 = call fast <4 x float> @llvm.exp.v4f32(<4 x float> %in)
86 define <vscale x 2 x double> @llvm_exp_vscale_f64(<vscale x 2 x double> %in) #0 {
87 ; CHECK-LABEL: define <vscale x 2 x double> @llvm_exp_vscale_f64
88 ; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
89 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svexp_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
90 ; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
92 %1 = call fast <vscale x 2 x double> @llvm.exp.nxv2f64(<vscale x 2 x double> %in)
93 ret <vscale x 2 x double> %1
96 define <vscale x 4 x float> @llvm_exp_vscale_f32(<vscale x 4 x float> %in) #0 {
97 ; CHECK-LABEL: define <vscale x 4 x float> @llvm_exp_vscale_f32
98 ; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
99 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svexp_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
100 ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
102 %1 = call fast <vscale x 4 x float> @llvm.exp.nxv4f32(<vscale x 4 x float> %in)
103 ret <vscale x 4 x float> %1
106 declare <2 x double> @llvm.exp10.v2f64(<2 x double>)
107 declare <4 x float> @llvm.exp10.v4f32(<4 x float>)
108 declare <vscale x 2 x double> @llvm.exp10.nxv2f64(<vscale x 2 x double>)
109 declare <vscale x 4 x float> @llvm.exp10.nxv4f32(<vscale x 4 x float>)
111 define <2 x double> @llvm_exp10_f64(<2 x double> %in) {
112 ; CHECK-LABEL: define <2 x double> @llvm_exp10_f64
113 ; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
114 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <2 x double> @armpl_vexp10q_f64(<2 x double> [[IN]])
115 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
117 %1 = call fast <2 x double> @llvm.exp10.v2f64(<2 x double> %in)
121 define <4 x float> @llvm_exp10_f32(<4 x float> %in) {
122 ; CHECK-LABEL: define <4 x float> @llvm_exp10_f32
123 ; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
124 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <4 x float> @armpl_vexp10q_f32(<4 x float> [[IN]])
125 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
127 %1 = call fast <4 x float> @llvm.exp10.v4f32(<4 x float> %in)
131 define <vscale x 2 x double> @llvm_exp10_vscale_f64(<vscale x 2 x double> %in) #0 {
132 ; CHECK-LABEL: define <vscale x 2 x double> @llvm_exp10_vscale_f64
133 ; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
134 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svexp10_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
135 ; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
137 %1 = call fast <vscale x 2 x double> @llvm.exp10.nxv2f64(<vscale x 2 x double> %in)
138 ret <vscale x 2 x double> %1
141 define <vscale x 4 x float> @llvm_exp10_vscale_f32(<vscale x 4 x float> %in) #0 {
142 ; CHECK-LABEL: define <vscale x 4 x float> @llvm_exp10_vscale_f32
143 ; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
144 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svexp10_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
145 ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
147 %1 = call fast <vscale x 4 x float> @llvm.exp10.nxv4f32(<vscale x 4 x float> %in)
148 ret <vscale x 4 x float> %1
151 declare <2 x double> @llvm.exp2.v2f64(<2 x double>)
152 declare <4 x float> @llvm.exp2.v4f32(<4 x float>)
153 declare <vscale x 2 x double> @llvm.exp2.nxv2f64(<vscale x 2 x double>)
154 declare <vscale x 4 x float> @llvm.exp2.nxv4f32(<vscale x 4 x float>)
156 define <2 x double> @llvm_exp2_f64(<2 x double> %in) {
157 ; CHECK-LABEL: define <2 x double> @llvm_exp2_f64
158 ; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
159 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <2 x double> @armpl_vexp2q_f64(<2 x double> [[IN]])
160 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
162 %1 = call fast <2 x double> @llvm.exp2.v2f64(<2 x double> %in)
166 define <4 x float> @llvm_exp2_f32(<4 x float> %in) {
167 ; CHECK-LABEL: define <4 x float> @llvm_exp2_f32
168 ; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
169 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <4 x float> @armpl_vexp2q_f32(<4 x float> [[IN]])
170 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
172 %1 = call fast <4 x float> @llvm.exp2.v4f32(<4 x float> %in)
176 define <vscale x 2 x double> @llvm_exp2_vscale_f64(<vscale x 2 x double> %in) #0 {
177 ; CHECK-LABEL: define <vscale x 2 x double> @llvm_exp2_vscale_f64
178 ; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
179 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svexp2_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
180 ; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
182 %1 = call fast <vscale x 2 x double> @llvm.exp2.nxv2f64(<vscale x 2 x double> %in)
183 ret <vscale x 2 x double> %1
186 define <vscale x 4 x float> @llvm_exp2_vscale_f32(<vscale x 4 x float> %in) #0 {
187 ; CHECK-LABEL: define <vscale x 4 x float> @llvm_exp2_vscale_f32
188 ; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
189 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svexp2_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
190 ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
192 %1 = call fast <vscale x 4 x float> @llvm.exp2.nxv4f32(<vscale x 4 x float> %in)
193 ret <vscale x 4 x float> %1
196 declare <2 x double> @llvm.log.v2f64(<2 x double>)
197 declare <4 x float> @llvm.log.v4f32(<4 x float>)
198 declare <vscale x 2 x double> @llvm.log.nxv2f64(<vscale x 2 x double>)
199 declare <vscale x 4 x float> @llvm.log.nxv4f32(<vscale x 4 x float>)
201 define <2 x double> @llvm_log_f64(<2 x double> %in) {
202 ; CHECK-LABEL: define <2 x double> @llvm_log_f64
203 ; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
204 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <2 x double> @armpl_vlogq_f64(<2 x double> [[IN]])
205 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
207 %1 = call fast <2 x double> @llvm.log.v2f64(<2 x double> %in)
211 define <4 x float> @llvm_log_f32(<4 x float> %in) {
212 ; CHECK-LABEL: define <4 x float> @llvm_log_f32
213 ; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
214 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <4 x float> @armpl_vlogq_f32(<4 x float> [[IN]])
215 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
217 %1 = call fast <4 x float> @llvm.log.v4f32(<4 x float> %in)
221 define <vscale x 2 x double> @llvm_log_vscale_f64(<vscale x 2 x double> %in) #0 {
222 ; CHECK-LABEL: define <vscale x 2 x double> @llvm_log_vscale_f64
223 ; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
224 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svlog_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
225 ; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
227 %1 = call fast <vscale x 2 x double> @llvm.log.nxv2f64(<vscale x 2 x double> %in)
228 ret <vscale x 2 x double> %1
231 define <vscale x 4 x float> @llvm_log_vscale_f32(<vscale x 4 x float> %in) #0 {
232 ; CHECK-LABEL: define <vscale x 4 x float> @llvm_log_vscale_f32
233 ; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
234 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svlog_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
235 ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
237 %1 = call fast <vscale x 4 x float> @llvm.log.nxv4f32(<vscale x 4 x float> %in)
238 ret <vscale x 4 x float> %1
241 declare <2 x double> @llvm.log10.v2f64(<2 x double>)
242 declare <4 x float> @llvm.log10.v4f32(<4 x float>)
243 declare <vscale x 2 x double> @llvm.log10.nxv2f64(<vscale x 2 x double>)
244 declare <vscale x 4 x float> @llvm.log10.nxv4f32(<vscale x 4 x float>)
246 define <2 x double> @llvm_log10_f64(<2 x double> %in) {
247 ; CHECK-LABEL: define <2 x double> @llvm_log10_f64
248 ; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
249 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <2 x double> @armpl_vlog10q_f64(<2 x double> [[IN]])
250 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
252 %1 = call fast <2 x double> @llvm.log10.v2f64(<2 x double> %in)
256 define <4 x float> @llvm_log10_f32(<4 x float> %in) {
257 ; CHECK-LABEL: define <4 x float> @llvm_log10_f32
258 ; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
259 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <4 x float> @armpl_vlog10q_f32(<4 x float> [[IN]])
260 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
262 %1 = call fast <4 x float> @llvm.log10.v4f32(<4 x float> %in)
266 define <vscale x 2 x double> @llvm_log10_vscale_f64(<vscale x 2 x double> %in) #0 {
267 ; CHECK-LABEL: define <vscale x 2 x double> @llvm_log10_vscale_f64
268 ; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
269 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svlog10_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
270 ; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
272 %1 = call fast <vscale x 2 x double> @llvm.log10.nxv2f64(<vscale x 2 x double> %in)
273 ret <vscale x 2 x double> %1
276 define <vscale x 4 x float> @llvm_log10_vscale_f32(<vscale x 4 x float> %in) #0 {
277 ; CHECK-LABEL: define <vscale x 4 x float> @llvm_log10_vscale_f32
278 ; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
279 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svlog10_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
280 ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
282 %1 = call fast <vscale x 4 x float> @llvm.log10.nxv4f32(<vscale x 4 x float> %in)
283 ret <vscale x 4 x float> %1
286 declare <2 x double> @llvm.log2.v2f64(<2 x double>)
287 declare <4 x float> @llvm.log2.v4f32(<4 x float>)
288 declare <vscale x 2 x double> @llvm.log2.nxv2f64(<vscale x 2 x double>)
289 declare <vscale x 4 x float> @llvm.log2.nxv4f32(<vscale x 4 x float>)
291 define <2 x double> @llvm_log2_f64(<2 x double> %in) {
292 ; CHECK-LABEL: define <2 x double> @llvm_log2_f64
293 ; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
294 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <2 x double> @armpl_vlog2q_f64(<2 x double> [[IN]])
295 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
297 %1 = call fast <2 x double> @llvm.log2.v2f64(<2 x double> %in)
301 define <4 x float> @llvm_log2_f32(<4 x float> %in) {
302 ; CHECK-LABEL: define <4 x float> @llvm_log2_f32
303 ; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
304 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <4 x float> @armpl_vlog2q_f32(<4 x float> [[IN]])
305 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
307 %1 = call fast <4 x float> @llvm.log2.v4f32(<4 x float> %in)
311 define <vscale x 2 x double> @llvm_log2_vscale_f64(<vscale x 2 x double> %in) #0 {
312 ; CHECK-LABEL: define <vscale x 2 x double> @llvm_log2_vscale_f64
313 ; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
314 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svlog2_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
315 ; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
317 %1 = call fast <vscale x 2 x double> @llvm.log2.nxv2f64(<vscale x 2 x double> %in)
318 ret <vscale x 2 x double> %1
321 define <vscale x 4 x float> @llvm_log2_vscale_f32(<vscale x 4 x float> %in) #0 {
322 ; CHECK-LABEL: define <vscale x 4 x float> @llvm_log2_vscale_f32
323 ; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
324 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svlog2_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
325 ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
327 %1 = call fast <vscale x 4 x float> @llvm.log2.nxv4f32(<vscale x 4 x float> %in)
328 ret <vscale x 4 x float> %1
331 declare <2 x double> @llvm.pow.v2f64(<2 x double>, <2 x double>)
332 declare <4 x float> @llvm.pow.v4f32(<4 x float>, <4 x float>)
333 declare <vscale x 2 x double> @llvm.pow.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
334 declare <vscale x 4 x float> @llvm.pow.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
336 define <2 x double> @llvm_pow_f64(<2 x double> %in, <2 x double> %power) {
337 ; CHECK-LABEL: define <2 x double> @llvm_pow_f64
338 ; CHECK-SAME: (<2 x double> [[IN:%.*]], <2 x double> [[POWER:%.*]]) {
339 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <2 x double> @armpl_vpowq_f64(<2 x double> [[IN]], <2 x double> [[POWER]])
340 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
342 %1 = call fast <2 x double> @llvm.pow.v2f64(<2 x double> %in, <2 x double> %power)
346 define <4 x float> @llvm_pow_f32(<4 x float> %in, <4 x float> %power) {
347 ; CHECK-LABEL: define <4 x float> @llvm_pow_f32
348 ; CHECK-SAME: (<4 x float> [[IN:%.*]], <4 x float> [[POWER:%.*]]) {
349 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <4 x float> @armpl_vpowq_f32(<4 x float> [[IN]], <4 x float> [[POWER]])
350 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
352 %1 = call fast <4 x float> @llvm.pow.v4f32(<4 x float> %in, <4 x float> %power)
356 define <vscale x 2 x double> @llvm_pow_vscale_f64(<vscale x 2 x double> %in, <vscale x 2 x double> %power) #0 {
357 ; CHECK-LABEL: define <vscale x 2 x double> @llvm_pow_vscale_f64
358 ; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]], <vscale x 2 x double> [[POWER:%.*]]) #[[ATTR1]] {
359 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svpow_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x double> [[POWER]], <vscale x 2 x i1> splat (i1 true))
360 ; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
362 %1 = call fast <vscale x 2 x double> @llvm.pow.nxv2f64(<vscale x 2 x double> %in, <vscale x 2 x double> %power)
363 ret <vscale x 2 x double> %1
366 define <vscale x 4 x float> @llvm_pow_vscale_f32(<vscale x 4 x float> %in, <vscale x 4 x float> %power) #0 {
367 ; CHECK-LABEL: define <vscale x 4 x float> @llvm_pow_vscale_f32
368 ; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]], <vscale x 4 x float> [[POWER:%.*]]) #[[ATTR1]] {
369 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svpow_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x float> [[POWER]], <vscale x 4 x i1> splat (i1 true))
370 ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
372 %1 = call fast <vscale x 4 x float> @llvm.pow.nxv4f32(<vscale x 4 x float> %in, <vscale x 4 x float> %power)
373 ret <vscale x 4 x float> %1
376 declare <2 x double> @llvm.sin.v2f64(<2 x double>)
377 declare <4 x float> @llvm.sin.v4f32(<4 x float>)
378 declare <vscale x 2 x double> @llvm.sin.nxv2f64(<vscale x 2 x double>)
379 declare <vscale x 4 x float> @llvm.sin.nxv4f32(<vscale x 4 x float>)
381 define <2 x double> @llvm_sin_f64(<2 x double> %in) {
382 ; CHECK-LABEL: define <2 x double> @llvm_sin_f64
383 ; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
384 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <2 x double> @armpl_vsinq_f64(<2 x double> [[IN]])
385 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
387 %1 = call fast <2 x double> @llvm.sin.v2f64(<2 x double> %in)
391 define <4 x float> @llvm_sin_f32(<4 x float> %in) {
392 ; CHECK-LABEL: define <4 x float> @llvm_sin_f32
393 ; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
394 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <4 x float> @armpl_vsinq_f32(<4 x float> [[IN]])
395 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
397 %1 = call fast <4 x float> @llvm.sin.v4f32(<4 x float> %in)
401 define <vscale x 2 x double> @llvm_sin_vscale_f64(<vscale x 2 x double> %in) #0 {
402 ; CHECK-LABEL: define <vscale x 2 x double> @llvm_sin_vscale_f64
403 ; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
404 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svsin_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
405 ; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
407 %1 = call fast <vscale x 2 x double> @llvm.sin.nxv2f64(<vscale x 2 x double> %in)
408 ret <vscale x 2 x double> %1
411 define <vscale x 4 x float> @llvm_sin_vscale_f32(<vscale x 4 x float> %in) #0 {
412 ; CHECK-LABEL: define <vscale x 4 x float> @llvm_sin_vscale_f32
413 ; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
414 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svsin_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
415 ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
417 %1 = call fast <vscale x 4 x float> @llvm.sin.nxv4f32(<vscale x 4 x float> %in)
418 ret <vscale x 4 x float> %1
421 declare <2 x double> @llvm.tan.v2f64(<2 x double>)
422 declare <4 x float> @llvm.tan.v4f32(<4 x float>)
423 declare <vscale x 2 x double> @llvm.tan.nxv2f64(<vscale x 2 x double>)
424 declare <vscale x 4 x float> @llvm.tan.nxv4f32(<vscale x 4 x float>)
426 define <2 x double> @llvm_tan_f64(<2 x double> %in) {
427 ; CHECK-LABEL: define <2 x double> @llvm_tan_f64
428 ; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
429 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <2 x double> @armpl_vtanq_f64(<2 x double> [[IN]])
430 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
432 %1 = call fast <2 x double> @llvm.tan.v2f64(<2 x double> %in)
436 define <4 x float> @llvm_tan_f32(<4 x float> %in) {
437 ; CHECK-LABEL: define <4 x float> @llvm_tan_f32
438 ; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
439 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <4 x float> @armpl_vtanq_f32(<4 x float> [[IN]])
440 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
442 %1 = call fast <4 x float> @llvm.tan.v4f32(<4 x float> %in)
446 define <vscale x 2 x double> @llvm_tan_vscale_f64(<vscale x 2 x double> %in) #0 {
447 ; CHECK-LABEL: define <vscale x 2 x double> @llvm_tan_vscale_f64
448 ; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
449 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svtan_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
450 ; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
452 %1 = call fast <vscale x 2 x double> @llvm.tan.nxv2f64(<vscale x 2 x double> %in)
453 ret <vscale x 2 x double> %1
456 define <vscale x 4 x float> @llvm_tan_vscale_f32(<vscale x 4 x float> %in) #0 {
457 ; CHECK-LABEL: define <vscale x 4 x float> @llvm_tan_vscale_f32
458 ; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
459 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svtan_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
460 ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
462 %1 = call fast <vscale x 4 x float> @llvm.tan.nxv4f32(<vscale x 4 x float> %in)
463 ret <vscale x 4 x float> %1
466 declare <2 x double> @llvm.acos.v2f64(<2 x double>)
467 declare <4 x float> @llvm.acos.v4f32(<4 x float>)
468 declare <vscale x 2 x double> @llvm.acos.nxv2f64(<vscale x 2 x double>)
469 declare <vscale x 4 x float> @llvm.acos.nxv4f32(<vscale x 4 x float>)
471 define <2 x double> @llvm_acos_f64(<2 x double> %in) {
472 ; CHECK-LABEL: define <2 x double> @llvm_acos_f64
473 ; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
474 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <2 x double> @armpl_vacosq_f64(<2 x double> [[IN]])
475 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
477 %1 = call fast <2 x double> @llvm.acos.v2f64(<2 x double> %in)
481 define <4 x float> @llvm_acos_f32(<4 x float> %in) {
482 ; CHECK-LABEL: define <4 x float> @llvm_acos_f32
483 ; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
484 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <4 x float> @armpl_vacosq_f32(<4 x float> [[IN]])
485 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
487 %1 = call fast <4 x float> @llvm.acos.v4f32(<4 x float> %in)
491 define <vscale x 2 x double> @llvm_acos_vscale_f64(<vscale x 2 x double> %in) #0 {
492 ; CHECK-LABEL: define <vscale x 2 x double> @llvm_acos_vscale_f64
493 ; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
494 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svacos_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
495 ; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
497 %1 = call fast <vscale x 2 x double> @llvm.acos.nxv2f64(<vscale x 2 x double> %in)
498 ret <vscale x 2 x double> %1
501 define <vscale x 4 x float> @llvm_acos_vscale_f32(<vscale x 4 x float> %in) #0 {
502 ; CHECK-LABEL: define <vscale x 4 x float> @llvm_acos_vscale_f32
503 ; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
504 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svacos_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
505 ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
507 %1 = call fast <vscale x 4 x float> @llvm.acos.nxv4f32(<vscale x 4 x float> %in)
508 ret <vscale x 4 x float> %1
511 declare <2 x double> @llvm.asin.v2f64(<2 x double>)
512 declare <4 x float> @llvm.asin.v4f32(<4 x float>)
513 declare <vscale x 2 x double> @llvm.asin.nxv2f64(<vscale x 2 x double>)
514 declare <vscale x 4 x float> @llvm.asin.nxv4f32(<vscale x 4 x float>)
516 define <2 x double> @llvm_asin_f64(<2 x double> %in) {
517 ; CHECK-LABEL: define <2 x double> @llvm_asin_f64
518 ; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
519 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <2 x double> @armpl_vasinq_f64(<2 x double> [[IN]])
520 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
522 %1 = call fast <2 x double> @llvm.asin.v2f64(<2 x double> %in)
526 define <4 x float> @llvm_asin_f32(<4 x float> %in) {
527 ; CHECK-LABEL: define <4 x float> @llvm_asin_f32
528 ; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
529 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <4 x float> @armpl_vasinq_f32(<4 x float> [[IN]])
530 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
532 %1 = call fast <4 x float> @llvm.asin.v4f32(<4 x float> %in)
536 define <vscale x 2 x double> @llvm_asin_vscale_f64(<vscale x 2 x double> %in) #0 {
537 ; CHECK-LABEL: define <vscale x 2 x double> @llvm_asin_vscale_f64
538 ; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
539 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svasin_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
540 ; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
542 %1 = call fast <vscale x 2 x double> @llvm.asin.nxv2f64(<vscale x 2 x double> %in)
543 ret <vscale x 2 x double> %1
546 define <vscale x 4 x float> @llvm_asin_vscale_f32(<vscale x 4 x float> %in) #0 {
547 ; CHECK-LABEL: define <vscale x 4 x float> @llvm_asin_vscale_f32
548 ; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
549 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svasin_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
550 ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
552 %1 = call fast <vscale x 4 x float> @llvm.asin.nxv4f32(<vscale x 4 x float> %in)
553 ret <vscale x 4 x float> %1
556 declare <2 x double> @llvm.atan.v2f64(<2 x double>)
557 declare <4 x float> @llvm.atan.v4f32(<4 x float>)
558 declare <vscale x 2 x double> @llvm.atan.nxv2f64(<vscale x 2 x double>)
559 declare <vscale x 4 x float> @llvm.atan.nxv4f32(<vscale x 4 x float>)
561 define <2 x double> @llvm_atan_f64(<2 x double> %in) {
562 ; CHECK-LABEL: define <2 x double> @llvm_atan_f64
563 ; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
564 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <2 x double> @armpl_vatanq_f64(<2 x double> [[IN]])
565 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
567 %1 = call fast <2 x double> @llvm.atan.v2f64(<2 x double> %in)
571 define <4 x float> @llvm_atan_f32(<4 x float> %in) {
572 ; CHECK-LABEL: define <4 x float> @llvm_atan_f32
573 ; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
574 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <4 x float> @armpl_vatanq_f32(<4 x float> [[IN]])
575 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
577 %1 = call fast <4 x float> @llvm.atan.v4f32(<4 x float> %in)
581 define <vscale x 2 x double> @llvm_atan_vscale_f64(<vscale x 2 x double> %in) #0 {
582 ; CHECK-LABEL: define <vscale x 2 x double> @llvm_atan_vscale_f64
583 ; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
584 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svatan_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
585 ; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
587 %1 = call fast <vscale x 2 x double> @llvm.atan.nxv2f64(<vscale x 2 x double> %in)
588 ret <vscale x 2 x double> %1
591 define <vscale x 4 x float> @llvm_atan_vscale_f32(<vscale x 4 x float> %in) #0 {
592 ; CHECK-LABEL: define <vscale x 4 x float> @llvm_atan_vscale_f32
593 ; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
594 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svatan_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
595 ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
597 %1 = call fast <vscale x 4 x float> @llvm.atan.nxv4f32(<vscale x 4 x float> %in)
598 ret <vscale x 4 x float> %1
601 declare <2 x double> @llvm.atan2.v2f64(<2 x double>, <2 x double>)
602 declare <4 x float> @llvm.atan2.v4f32(<4 x float>, <4 x float>)
603 declare <vscale x 2 x double> @llvm.atan2.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
604 declare <vscale x 4 x float> @llvm.atan2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
606 define <2 x double> @llvm_atan2_f64(<2 x double> %in1, <2 x double> %in2) {
607 ; CHECK-LABEL: define <2 x double> @llvm_atan2_f64
608 ; CHECK-SAME: (<2 x double> [[IN1:%.*]], <2 x double> [[IN2:%.*]]) {
609 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <2 x double> @armpl_vatan2q_f64(<2 x double> [[IN1]], <2 x double> [[IN2]])
610 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
612 %1 = call fast <2 x double> @llvm.atan2.v2f64(<2 x double> %in1, <2 x double> %in2)
616 define <4 x float> @llvm_atan2_f32(<4 x float> %in1, <4 x float> %in2) {
617 ; CHECK-LABEL: define <4 x float> @llvm_atan2_f32
618 ; CHECK-SAME: (<4 x float> [[IN1:%.*]], <4 x float> [[IN2:%.*]]) {
619 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <4 x float> @armpl_vatan2q_f32(<4 x float> [[IN1]], <4 x float> [[IN2]])
620 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
622 %1 = call fast <4 x float> @llvm.atan2.v4f32(<4 x float> %in1, <4 x float> %in2)
626 define <vscale x 2 x double> @llvm_atan2_vscale_f64(<vscale x 2 x double> %in1, <vscale x 2 x double> %in2) #0 {
627 ; CHECK-LABEL: define <vscale x 2 x double> @llvm_atan2_vscale_f64
628 ; CHECK-SAME: (<vscale x 2 x double> [[IN1:%.*]], <vscale x 2 x double> [[IN2:%.*]]) #[[ATTR1]] {
629 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svatan2_f64_x(<vscale x 2 x double> [[IN1]], <vscale x 2 x double> [[IN2]], <vscale x 2 x i1> splat (i1 true))
630 ; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
632 %1 = call fast <vscale x 2 x double> @llvm.atan2.nxv2f64(<vscale x 2 x double> %in1, <vscale x 2 x double> %in2)
633 ret <vscale x 2 x double> %1
636 define <vscale x 4 x float> @llvm_atan2_vscale_f32(<vscale x 4 x float> %in1, <vscale x 4 x float> %in2) #0 {
637 ; CHECK-LABEL: define <vscale x 4 x float> @llvm_atan2_vscale_f32
638 ; CHECK-SAME: (<vscale x 4 x float> [[IN1:%.*]], <vscale x 4 x float> [[IN2:%.*]]) #[[ATTR1]] {
639 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svatan2_f32_x(<vscale x 4 x float> [[IN1]], <vscale x 4 x float> [[IN2]], <vscale x 4 x i1> splat (i1 true))
640 ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
642 %1 = call fast <vscale x 4 x float> @llvm.atan2.nxv4f32(<vscale x 4 x float> %in1, <vscale x 4 x float> %in2)
643 ret <vscale x 4 x float> %1
646 declare <2 x double> @llvm.cosh.v2f64(<2 x double>)
647 declare <4 x float> @llvm.cosh.v4f32(<4 x float>)
648 declare <vscale x 2 x double> @llvm.cosh.nxv2f64(<vscale x 2 x double>)
649 declare <vscale x 4 x float> @llvm.cosh.nxv4f32(<vscale x 4 x float>)
651 define <2 x double> @llvm_cosh_f64(<2 x double> %in) {
652 ; CHECK-LABEL: define <2 x double> @llvm_cosh_f64
653 ; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
654 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <2 x double> @armpl_vcoshq_f64(<2 x double> [[IN]])
655 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
657 %1 = call fast <2 x double> @llvm.cosh.v2f64(<2 x double> %in)
661 define <4 x float> @llvm_cosh_f32(<4 x float> %in) {
662 ; CHECK-LABEL: define <4 x float> @llvm_cosh_f32
663 ; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
664 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <4 x float> @armpl_vcoshq_f32(<4 x float> [[IN]])
665 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
667 %1 = call fast <4 x float> @llvm.cosh.v4f32(<4 x float> %in)
671 define <vscale x 2 x double> @llvm_cosh_vscale_f64(<vscale x 2 x double> %in) #0 {
672 ; CHECK-LABEL: define <vscale x 2 x double> @llvm_cosh_vscale_f64
673 ; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
674 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svcosh_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
675 ; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
677 %1 = call fast <vscale x 2 x double> @llvm.cosh.nxv2f64(<vscale x 2 x double> %in)
678 ret <vscale x 2 x double> %1
681 define <vscale x 4 x float> @llvm_cosh_vscale_f32(<vscale x 4 x float> %in) #0 {
682 ; CHECK-LABEL: define <vscale x 4 x float> @llvm_cosh_vscale_f32
683 ; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
684 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svcosh_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
685 ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
687 %1 = call fast <vscale x 4 x float> @llvm.cosh.nxv4f32(<vscale x 4 x float> %in)
688 ret <vscale x 4 x float> %1
691 declare <2 x double> @llvm.sinh.v2f64(<2 x double>)
692 declare <4 x float> @llvm.sinh.v4f32(<4 x float>)
693 declare <vscale x 2 x double> @llvm.sinh.nxv2f64(<vscale x 2 x double>)
694 declare <vscale x 4 x float> @llvm.sinh.nxv4f32(<vscale x 4 x float>)
696 define <2 x double> @llvm_sinh_f64(<2 x double> %in) {
697 ; CHECK-LABEL: define <2 x double> @llvm_sinh_f64
698 ; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
699 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <2 x double> @armpl_vsinhq_f64(<2 x double> [[IN]])
700 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
702 %1 = call fast <2 x double> @llvm.sinh.v2f64(<2 x double> %in)
706 define <4 x float> @llvm_sinh_f32(<4 x float> %in) {
707 ; CHECK-LABEL: define <4 x float> @llvm_sinh_f32
708 ; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
709 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <4 x float> @armpl_vsinhq_f32(<4 x float> [[IN]])
710 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
712 %1 = call fast <4 x float> @llvm.sinh.v4f32(<4 x float> %in)
716 define <vscale x 2 x double> @llvm_sinh_vscale_f64(<vscale x 2 x double> %in) #0 {
717 ; CHECK-LABEL: define <vscale x 2 x double> @llvm_sinh_vscale_f64
718 ; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
719 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svsinh_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
720 ; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
722 %1 = call fast <vscale x 2 x double> @llvm.sinh.nxv2f64(<vscale x 2 x double> %in)
723 ret <vscale x 2 x double> %1
726 define <vscale x 4 x float> @llvm_sinh_vscale_f32(<vscale x 4 x float> %in) #0 {
727 ; CHECK-LABEL: define <vscale x 4 x float> @llvm_sinh_vscale_f32
728 ; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
729 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svsinh_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
730 ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
732 %1 = call fast <vscale x 4 x float> @llvm.sinh.nxv4f32(<vscale x 4 x float> %in)
733 ret <vscale x 4 x float> %1
736 declare <2 x double> @llvm.tanh.v2f64(<2 x double>)
737 declare <4 x float> @llvm.tanh.v4f32(<4 x float>)
738 declare <vscale x 2 x double> @llvm.tanh.nxv2f64(<vscale x 2 x double>)
739 declare <vscale x 4 x float> @llvm.tanh.nxv4f32(<vscale x 4 x float>)
741 define <2 x double> @llvm_tanh_f64(<2 x double> %in) {
742 ; CHECK-LABEL: define <2 x double> @llvm_tanh_f64
743 ; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
744 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <2 x double> @armpl_vtanhq_f64(<2 x double> [[IN]])
745 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
747 %1 = call fast <2 x double> @llvm.tanh.v2f64(<2 x double> %in)
751 define <4 x float> @llvm_tanh_f32(<4 x float> %in) {
752 ; CHECK-LABEL: define <4 x float> @llvm_tanh_f32
753 ; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
754 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <4 x float> @armpl_vtanhq_f32(<4 x float> [[IN]])
755 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
757 %1 = call fast <4 x float> @llvm.tanh.v4f32(<4 x float> %in)
761 define <vscale x 2 x double> @llvm_tanh_vscale_f64(<vscale x 2 x double> %in) #0 {
762 ; CHECK-LABEL: define <vscale x 2 x double> @llvm_tanh_vscale_f64
763 ; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
764 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svtanh_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
765 ; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
767 %1 = call fast <vscale x 2 x double> @llvm.tanh.nxv2f64(<vscale x 2 x double> %in)
768 ret <vscale x 2 x double> %1
771 define <vscale x 4 x float> @llvm_tanh_vscale_f32(<vscale x 4 x float> %in) #0 {
772 ; CHECK-LABEL: define <vscale x 4 x float> @llvm_tanh_vscale_f32
773 ; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
774 ; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svtanh_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
775 ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
777 %1 = call fast <vscale x 4 x float> @llvm.tanh.nxv4f32(<vscale x 4 x float> %in)
778 ret <vscale x 4 x float> %1
781 attributes #0 = { "target-features"="+sve" }
783 ; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
784 ; CHECK: attributes #[[ATTR1]] = { "target-features"="+sve" }