1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zvfbfmin,+v \
3 ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
4 ; RUN: --check-prefixes=CHECK,ZVFH
5 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zvfbfmin,+v \
6 ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
7 ; RUN: --check-prefixes=CHECK,ZVFH
8 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \
9 ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
10 ; RUN: --check-prefixes=CHECK,ZVFHMIN
11 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \
12 ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
13 ; RUN: --check-prefixes=CHECK,ZVFHMIN
15 define <vscale x 1 x bfloat> @nxv1bf16(<vscale x 1 x bfloat> %vm, <vscale x 1 x bfloat> %vs) {
16 ; CHECK-LABEL: nxv1bf16:
18 ; CHECK-NEXT: lui a0, 8
19 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
20 ; CHECK-NEXT: vand.vx v9, v9, a0
21 ; CHECK-NEXT: addi a0, a0, -1
22 ; CHECK-NEXT: vand.vx v8, v8, a0
23 ; CHECK-NEXT: vor.vv v8, v8, v9
25 %r = call <vscale x 1 x bfloat> @llvm.copysign.nxv1bf16(<vscale x 1 x bfloat> %vm, <vscale x 1 x bfloat> %vs)
26 ret <vscale x 1 x bfloat> %r
29 define <vscale x 2 x bfloat> @nxv2bf16(<vscale x 2 x bfloat> %vm, <vscale x 2 x bfloat> %vs) {
30 ; CHECK-LABEL: nxv2bf16:
32 ; CHECK-NEXT: lui a0, 8
33 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
34 ; CHECK-NEXT: vand.vx v9, v9, a0
35 ; CHECK-NEXT: addi a0, a0, -1
36 ; CHECK-NEXT: vand.vx v8, v8, a0
37 ; CHECK-NEXT: vor.vv v8, v8, v9
39 %r = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> %vm, <vscale x 2 x bfloat> %vs)
40 ret <vscale x 2 x bfloat> %r
43 define <vscale x 4 x bfloat> @nxv4bf16(<vscale x 4 x bfloat> %vm, <vscale x 4 x bfloat> %vs) {
44 ; CHECK-LABEL: nxv4bf16:
46 ; CHECK-NEXT: lui a0, 8
47 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
48 ; CHECK-NEXT: vand.vx v9, v9, a0
49 ; CHECK-NEXT: addi a0, a0, -1
50 ; CHECK-NEXT: vand.vx v8, v8, a0
51 ; CHECK-NEXT: vor.vv v8, v8, v9
53 %r = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> %vm, <vscale x 4 x bfloat> %vs)
54 ret <vscale x 4 x bfloat> %r
57 define <vscale x 8 x bfloat> @nxv8bf16(<vscale x 8 x bfloat> %vm, <vscale x 8 x bfloat> %vs) {
58 ; CHECK-LABEL: nxv8bf16:
60 ; CHECK-NEXT: lui a0, 8
61 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
62 ; CHECK-NEXT: vand.vx v10, v10, a0
63 ; CHECK-NEXT: addi a0, a0, -1
64 ; CHECK-NEXT: vand.vx v8, v8, a0
65 ; CHECK-NEXT: vor.vv v8, v8, v10
67 %r = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> %vm, <vscale x 8 x bfloat> %vs)
68 ret <vscale x 8 x bfloat> %r
71 define <vscale x 16 x bfloat> @nxv16bf16(<vscale x 16 x bfloat> %vm, <vscale x 16 x bfloat> %vs) {
72 ; CHECK-LABEL: nxv16bf16:
74 ; CHECK-NEXT: lui a0, 8
75 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
76 ; CHECK-NEXT: vand.vx v12, v12, a0
77 ; CHECK-NEXT: addi a0, a0, -1
78 ; CHECK-NEXT: vand.vx v8, v8, a0
79 ; CHECK-NEXT: vor.vv v8, v8, v12
81 %r = call <vscale x 16 x bfloat> @llvm.copysign.nxv16bf16(<vscale x 16 x bfloat> %vm, <vscale x 16 x bfloat> %vs)
82 ret <vscale x 16 x bfloat> %r
85 define <vscale x 32 x bfloat> @nxv32bf32(<vscale x 32 x bfloat> %vm, <vscale x 32 x bfloat> %vs) {
86 ; CHECK-LABEL: nxv32bf32:
88 ; CHECK-NEXT: lui a0, 8
89 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
90 ; CHECK-NEXT: vand.vx v16, v16, a0
91 ; CHECK-NEXT: addi a0, a0, -1
92 ; CHECK-NEXT: vand.vx v8, v8, a0
93 ; CHECK-NEXT: vor.vv v8, v8, v16
95 %r = call <vscale x 32 x bfloat> @llvm.copysign.nxv32bf32(<vscale x 32 x bfloat> %vm, <vscale x 32 x bfloat> %vs)
96 ret <vscale x 32 x bfloat> %r
99 declare <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>)
101 define <vscale x 1 x half> @vfcopysign_vv_nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %vs) {
102 ; ZVFH-LABEL: vfcopysign_vv_nxv1f16:
104 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
105 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v9
108 ; ZVFHMIN-LABEL: vfcopysign_vv_nxv1f16:
110 ; ZVFHMIN-NEXT: lui a0, 8
111 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
112 ; ZVFHMIN-NEXT: vand.vx v9, v9, a0
113 ; ZVFHMIN-NEXT: addi a0, a0, -1
114 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
115 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
117 %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %vs)
118 ret <vscale x 1 x half> %r
121 define <vscale x 1 x half> @vfcopysign_vf_nxv1f16(<vscale x 1 x half> %vm, half %s) {
122 ; ZVFH-LABEL: vfcopysign_vf_nxv1f16:
124 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
125 ; ZVFH-NEXT: vfsgnj.vf v8, v8, fa0
128 ; ZVFHMIN-LABEL: vfcopysign_vf_nxv1f16:
130 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
131 ; ZVFHMIN-NEXT: lui a1, 8
132 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
133 ; ZVFHMIN-NEXT: vmv.v.x v9, a0
134 ; ZVFHMIN-NEXT: addi a0, a1, -1
135 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
136 ; ZVFHMIN-NEXT: vand.vx v9, v9, a1
137 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
139 %head = insertelement <vscale x 1 x half> poison, half %s, i32 0
140 %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
141 %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %splat)
142 ret <vscale x 1 x half> %r
145 define <vscale x 1 x half> @vfcopynsign_vv_nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %vs) {
146 ; ZVFH-LABEL: vfcopynsign_vv_nxv1f16:
148 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
149 ; ZVFH-NEXT: vfsgnjn.vv v8, v8, v9
152 ; ZVFHMIN-LABEL: vfcopynsign_vv_nxv1f16:
154 ; ZVFHMIN-NEXT: lui a0, 8
155 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
156 ; ZVFHMIN-NEXT: vxor.vx v9, v9, a0
157 ; ZVFHMIN-NEXT: vand.vx v9, v9, a0
158 ; ZVFHMIN-NEXT: addi a0, a0, -1
159 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
160 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
162 %n = fneg <vscale x 1 x half> %vs
163 %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %n)
164 ret <vscale x 1 x half> %r
167 define <vscale x 1 x half> @vfcopynsign_vf_nxv1f16(<vscale x 1 x half> %vm, half %s) {
168 ; ZVFH-LABEL: vfcopynsign_vf_nxv1f16:
170 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
171 ; ZVFH-NEXT: vfsgnjn.vf v8, v8, fa0
174 ; ZVFHMIN-LABEL: vfcopynsign_vf_nxv1f16:
176 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
177 ; ZVFHMIN-NEXT: lui a1, 8
178 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
179 ; ZVFHMIN-NEXT: vmv.v.x v9, a0
180 ; ZVFHMIN-NEXT: addi a0, a1, -1
181 ; ZVFHMIN-NEXT: vxor.vx v9, v9, a1
182 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
183 ; ZVFHMIN-NEXT: vand.vx v9, v9, a1
184 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
186 %head = insertelement <vscale x 1 x half> poison, half %s, i32 0
187 %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
188 %n = fneg <vscale x 1 x half> %splat
189 %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %n)
190 ret <vscale x 1 x half> %r
193 define <vscale x 1 x half> @vfcopysign_exttrunc_vv_nxv1f16_nxv1f32(<vscale x 1 x half> %vm, <vscale x 1 x float> %vs) {
194 ; ZVFH-LABEL: vfcopysign_exttrunc_vv_nxv1f16_nxv1f32:
196 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
197 ; ZVFH-NEXT: vfncvt.f.f.w v10, v9
198 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v10
201 ; ZVFHMIN-LABEL: vfcopysign_exttrunc_vv_nxv1f16_nxv1f32:
203 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
204 ; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v9
205 ; ZVFHMIN-NEXT: lui a0, 8
206 ; ZVFHMIN-NEXT: vand.vx v9, v10, a0
207 ; ZVFHMIN-NEXT: addi a0, a0, -1
208 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
209 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
211 %e = fptrunc <vscale x 1 x float> %vs to <vscale x 1 x half>
212 %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %e)
213 ret <vscale x 1 x half> %r
216 define <vscale x 1 x half> @vfcopysign_exttrunc_vf_nxv1f16_nxv1f32(<vscale x 1 x half> %vm, float %s) {
217 ; ZVFH-LABEL: vfcopysign_exttrunc_vf_nxv1f16_nxv1f32:
219 ; ZVFH-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
220 ; ZVFH-NEXT: vfmv.v.f v9, fa0
221 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
222 ; ZVFH-NEXT: vfncvt.f.f.w v10, v9
223 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v10
226 ; ZVFHMIN-LABEL: vfcopysign_exttrunc_vf_nxv1f16_nxv1f32:
228 ; ZVFHMIN-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
229 ; ZVFHMIN-NEXT: vfmv.v.f v9, fa0
230 ; ZVFHMIN-NEXT: lui a0, 8
231 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
232 ; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v9
233 ; ZVFHMIN-NEXT: addi a1, a0, -1
234 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
235 ; ZVFHMIN-NEXT: vand.vx v9, v10, a0
236 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
238 %head = insertelement <vscale x 1 x float> poison, float %s, i32 0
239 %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
240 %esplat = fptrunc <vscale x 1 x float> %splat to <vscale x 1 x half>
241 %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %esplat)
242 ret <vscale x 1 x half> %r
245 define <vscale x 1 x half> @vfcopynsign_exttrunc_vv_nxv1f16_nxv1f32(<vscale x 1 x half> %vm, <vscale x 1 x float> %vs) {
246 ; ZVFH-LABEL: vfcopynsign_exttrunc_vv_nxv1f16_nxv1f32:
248 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
249 ; ZVFH-NEXT: vfncvt.f.f.w v10, v9
250 ; ZVFH-NEXT: vfsgnjn.vv v8, v8, v10
253 ; ZVFHMIN-LABEL: vfcopynsign_exttrunc_vv_nxv1f16_nxv1f32:
255 ; ZVFHMIN-NEXT: lui a0, 8
256 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
257 ; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v9
258 ; ZVFHMIN-NEXT: addi a1, a0, -1
259 ; ZVFHMIN-NEXT: vxor.vx v9, v10, a0
260 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
261 ; ZVFHMIN-NEXT: vand.vx v9, v9, a0
262 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
264 %n = fneg <vscale x 1 x float> %vs
265 %eneg = fptrunc <vscale x 1 x float> %n to <vscale x 1 x half>
266 %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %eneg)
267 ret <vscale x 1 x half> %r
270 define <vscale x 1 x half> @vfcopynsign_exttrunc_vf_nxv1f16_nxv1f32(<vscale x 1 x half> %vm, float %s) {
271 ; ZVFH-LABEL: vfcopynsign_exttrunc_vf_nxv1f16_nxv1f32:
273 ; ZVFH-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
274 ; ZVFH-NEXT: vfmv.v.f v9, fa0
275 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
276 ; ZVFH-NEXT: vfncvt.f.f.w v10, v9
277 ; ZVFH-NEXT: vfsgnjn.vv v8, v8, v10
280 ; ZVFHMIN-LABEL: vfcopynsign_exttrunc_vf_nxv1f16_nxv1f32:
282 ; ZVFHMIN-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
283 ; ZVFHMIN-NEXT: vfmv.v.f v9, fa0
284 ; ZVFHMIN-NEXT: lui a0, 8
285 ; ZVFHMIN-NEXT: addi a1, a0, -1
286 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
287 ; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v9
288 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
289 ; ZVFHMIN-NEXT: vxor.vx v9, v10, a0
290 ; ZVFHMIN-NEXT: vand.vx v9, v9, a0
291 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
293 %head = insertelement <vscale x 1 x float> poison, float %s, i32 0
294 %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
295 %n = fneg <vscale x 1 x float> %splat
296 %eneg = fptrunc <vscale x 1 x float> %n to <vscale x 1 x half>
297 %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %eneg)
298 ret <vscale x 1 x half> %r
301 define <vscale x 1 x half> @vfcopysign_exttrunc_vv_nxv1f16_nxv1f64(<vscale x 1 x half> %vm, <vscale x 1 x double> %vs) {
302 ; ZVFH-LABEL: vfcopysign_exttrunc_vv_nxv1f16_nxv1f64:
304 ; ZVFH-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
305 ; ZVFH-NEXT: vfncvt.rod.f.f.w v10, v9
306 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
307 ; ZVFH-NEXT: vfncvt.f.f.w v9, v10
308 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v9
311 ; ZVFHMIN-LABEL: vfcopysign_exttrunc_vv_nxv1f16_nxv1f64:
313 ; ZVFHMIN-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
314 ; ZVFHMIN-NEXT: vfncvt.rod.f.f.w v10, v9
315 ; ZVFHMIN-NEXT: lui a0, 8
316 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
317 ; ZVFHMIN-NEXT: vfncvt.f.f.w v9, v10
318 ; ZVFHMIN-NEXT: addi a1, a0, -1
319 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
320 ; ZVFHMIN-NEXT: vand.vx v9, v9, a0
321 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
323 %e = fptrunc <vscale x 1 x double> %vs to <vscale x 1 x half>
324 %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %e)
325 ret <vscale x 1 x half> %r
328 define <vscale x 1 x half> @vfcopysign_exttrunc_vf_nxv1f16_nxv1f64(<vscale x 1 x half> %vm, double %s) {
329 ; ZVFH-LABEL: vfcopysign_exttrunc_vf_nxv1f16_nxv1f64:
331 ; ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma
332 ; ZVFH-NEXT: vfmv.v.f v9, fa0
333 ; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
334 ; ZVFH-NEXT: vfncvt.rod.f.f.w v10, v9
335 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
336 ; ZVFH-NEXT: vfncvt.f.f.w v9, v10
337 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v9
340 ; ZVFHMIN-LABEL: vfcopysign_exttrunc_vf_nxv1f16_nxv1f64:
342 ; ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma
343 ; ZVFHMIN-NEXT: vfmv.v.f v9, fa0
344 ; ZVFHMIN-NEXT: lui a0, 8
345 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
346 ; ZVFHMIN-NEXT: vfncvt.rod.f.f.w v10, v9
347 ; ZVFHMIN-NEXT: addi a1, a0, -1
348 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
349 ; ZVFHMIN-NEXT: vfncvt.f.f.w v9, v10
350 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
351 ; ZVFHMIN-NEXT: vand.vx v9, v9, a0
352 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
354 %head = insertelement <vscale x 1 x double> poison, double %s, i32 0
355 %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
356 %esplat = fptrunc <vscale x 1 x double> %splat to <vscale x 1 x half>
357 %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %esplat)
358 ret <vscale x 1 x half> %r
361 define <vscale x 1 x half> @vfcopynsign_exttrunc_vv_nxv1f16_nxv1f64(<vscale x 1 x half> %vm, <vscale x 1 x double> %vs) {
362 ; ZVFH-LABEL: vfcopynsign_exttrunc_vv_nxv1f16_nxv1f64:
364 ; ZVFH-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
365 ; ZVFH-NEXT: vfncvt.rod.f.f.w v10, v9
366 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
367 ; ZVFH-NEXT: vfncvt.f.f.w v9, v10
368 ; ZVFH-NEXT: vfsgnjn.vv v8, v8, v9
371 ; ZVFHMIN-LABEL: vfcopynsign_exttrunc_vv_nxv1f16_nxv1f64:
373 ; ZVFHMIN-NEXT: lui a0, 8
374 ; ZVFHMIN-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
375 ; ZVFHMIN-NEXT: vfncvt.rod.f.f.w v10, v9
376 ; ZVFHMIN-NEXT: addi a1, a0, -1
377 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
378 ; ZVFHMIN-NEXT: vfncvt.f.f.w v9, v10
379 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
380 ; ZVFHMIN-NEXT: vxor.vx v9, v9, a0
381 ; ZVFHMIN-NEXT: vand.vx v9, v9, a0
382 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
384 %n = fneg <vscale x 1 x double> %vs
385 %eneg = fptrunc <vscale x 1 x double> %n to <vscale x 1 x half>
386 %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %eneg)
387 ret <vscale x 1 x half> %r
390 define <vscale x 1 x half> @vfcopynsign_exttrunc_vf_nxv1f16_nxv1f64(<vscale x 1 x half> %vm, double %s) {
391 ; ZVFH-LABEL: vfcopynsign_exttrunc_vf_nxv1f16_nxv1f64:
393 ; ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma
394 ; ZVFH-NEXT: vfmv.v.f v9, fa0
395 ; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
396 ; ZVFH-NEXT: vfncvt.rod.f.f.w v10, v9
397 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
398 ; ZVFH-NEXT: vfncvt.f.f.w v9, v10
399 ; ZVFH-NEXT: vfsgnjn.vv v8, v8, v9
402 ; ZVFHMIN-LABEL: vfcopynsign_exttrunc_vf_nxv1f16_nxv1f64:
404 ; ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma
405 ; ZVFHMIN-NEXT: vfmv.v.f v9, fa0
406 ; ZVFHMIN-NEXT: lui a0, 8
407 ; ZVFHMIN-NEXT: addi a1, a0, -1
408 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
409 ; ZVFHMIN-NEXT: vfncvt.rod.f.f.w v10, v9
410 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
411 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
412 ; ZVFHMIN-NEXT: vfncvt.f.f.w v9, v10
413 ; ZVFHMIN-NEXT: vxor.vx v9, v9, a0
414 ; ZVFHMIN-NEXT: vand.vx v9, v9, a0
415 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
417 %head = insertelement <vscale x 1 x double> poison, double %s, i32 0
418 %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
419 %n = fneg <vscale x 1 x double> %splat
420 %eneg = fptrunc <vscale x 1 x double> %n to <vscale x 1 x half>
421 %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %eneg)
422 ret <vscale x 1 x half> %r
425 declare <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>)
427 define <vscale x 2 x half> @vfcopysign_vv_nxv2f16(<vscale x 2 x half> %vm, <vscale x 2 x half> %vs) {
428 ; ZVFH-LABEL: vfcopysign_vv_nxv2f16:
430 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
431 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v9
434 ; ZVFHMIN-LABEL: vfcopysign_vv_nxv2f16:
436 ; ZVFHMIN-NEXT: lui a0, 8
437 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
438 ; ZVFHMIN-NEXT: vand.vx v9, v9, a0
439 ; ZVFHMIN-NEXT: addi a0, a0, -1
440 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
441 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
443 %r = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> %vm, <vscale x 2 x half> %vs)
444 ret <vscale x 2 x half> %r
447 define <vscale x 2 x half> @vfcopysign_vf_nxv2f16(<vscale x 2 x half> %vm, half %s) {
448 ; ZVFH-LABEL: vfcopysign_vf_nxv2f16:
450 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
451 ; ZVFH-NEXT: vfsgnj.vf v8, v8, fa0
454 ; ZVFHMIN-LABEL: vfcopysign_vf_nxv2f16:
456 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
457 ; ZVFHMIN-NEXT: lui a1, 8
458 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
459 ; ZVFHMIN-NEXT: vmv.v.x v9, a0
460 ; ZVFHMIN-NEXT: addi a0, a1, -1
461 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
462 ; ZVFHMIN-NEXT: vand.vx v9, v9, a1
463 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
465 %head = insertelement <vscale x 2 x half> poison, half %s, i32 0
466 %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
467 %r = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> %vm, <vscale x 2 x half> %splat)
468 ret <vscale x 2 x half> %r
471 define <vscale x 2 x half> @vfcopynsign_vv_nxv2f16(<vscale x 2 x half> %vm, <vscale x 2 x half> %vs) {
472 ; ZVFH-LABEL: vfcopynsign_vv_nxv2f16:
474 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
475 ; ZVFH-NEXT: vfsgnjn.vv v8, v8, v9
478 ; ZVFHMIN-LABEL: vfcopynsign_vv_nxv2f16:
480 ; ZVFHMIN-NEXT: lui a0, 8
481 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
482 ; ZVFHMIN-NEXT: vxor.vx v9, v9, a0
483 ; ZVFHMIN-NEXT: vand.vx v9, v9, a0
484 ; ZVFHMIN-NEXT: addi a0, a0, -1
485 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
486 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
488 %n = fneg <vscale x 2 x half> %vs
489 %r = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> %vm, <vscale x 2 x half> %n)
490 ret <vscale x 2 x half> %r
493 define <vscale x 2 x half> @vfcopynsign_vf_nxv2f16(<vscale x 2 x half> %vm, half %s) {
494 ; ZVFH-LABEL: vfcopynsign_vf_nxv2f16:
496 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
497 ; ZVFH-NEXT: vfsgnjn.vf v8, v8, fa0
500 ; ZVFHMIN-LABEL: vfcopynsign_vf_nxv2f16:
502 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
503 ; ZVFHMIN-NEXT: lui a1, 8
504 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
505 ; ZVFHMIN-NEXT: vmv.v.x v9, a0
506 ; ZVFHMIN-NEXT: addi a0, a1, -1
507 ; ZVFHMIN-NEXT: vxor.vx v9, v9, a1
508 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
509 ; ZVFHMIN-NEXT: vand.vx v9, v9, a1
510 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
512 %head = insertelement <vscale x 2 x half> poison, half %s, i32 0
513 %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
514 %n = fneg <vscale x 2 x half> %splat
515 %r = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> %vm, <vscale x 2 x half> %n)
516 ret <vscale x 2 x half> %r
519 declare <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>)
521 define <vscale x 4 x half> @vfcopysign_vv_nxv4f16(<vscale x 4 x half> %vm, <vscale x 4 x half> %vs) {
522 ; ZVFH-LABEL: vfcopysign_vv_nxv4f16:
524 ; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
525 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v9
528 ; ZVFHMIN-LABEL: vfcopysign_vv_nxv4f16:
530 ; ZVFHMIN-NEXT: lui a0, 8
531 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
532 ; ZVFHMIN-NEXT: vand.vx v9, v9, a0
533 ; ZVFHMIN-NEXT: addi a0, a0, -1
534 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
535 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
537 %r = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> %vm, <vscale x 4 x half> %vs)
538 ret <vscale x 4 x half> %r
541 define <vscale x 4 x half> @vfcopysign_vf_nxv4f16(<vscale x 4 x half> %vm, half %s) {
542 ; ZVFH-LABEL: vfcopysign_vf_nxv4f16:
544 ; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
545 ; ZVFH-NEXT: vfsgnj.vf v8, v8, fa0
548 ; ZVFHMIN-LABEL: vfcopysign_vf_nxv4f16:
550 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
551 ; ZVFHMIN-NEXT: lui a1, 8
552 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma
553 ; ZVFHMIN-NEXT: vmv.v.x v9, a0
554 ; ZVFHMIN-NEXT: addi a0, a1, -1
555 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
556 ; ZVFHMIN-NEXT: vand.vx v9, v9, a1
557 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
559 %head = insertelement <vscale x 4 x half> poison, half %s, i32 0
560 %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
561 %r = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> %vm, <vscale x 4 x half> %splat)
562 ret <vscale x 4 x half> %r
565 define <vscale x 4 x half> @vfcopynsign_vv_nxv4f16(<vscale x 4 x half> %vm, <vscale x 4 x half> %vs) {
566 ; ZVFH-LABEL: vfcopynsign_vv_nxv4f16:
568 ; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
569 ; ZVFH-NEXT: vfsgnjn.vv v8, v8, v9
572 ; ZVFHMIN-LABEL: vfcopynsign_vv_nxv4f16:
574 ; ZVFHMIN-NEXT: lui a0, 8
575 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
576 ; ZVFHMIN-NEXT: vxor.vx v9, v9, a0
577 ; ZVFHMIN-NEXT: vand.vx v9, v9, a0
578 ; ZVFHMIN-NEXT: addi a0, a0, -1
579 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
580 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
582 %n = fneg <vscale x 4 x half> %vs
583 %r = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> %vm, <vscale x 4 x half> %n)
584 ret <vscale x 4 x half> %r
587 define <vscale x 4 x half> @vfcopynsign_vf_nxv4f16(<vscale x 4 x half> %vm, half %s) {
588 ; ZVFH-LABEL: vfcopynsign_vf_nxv4f16:
590 ; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
591 ; ZVFH-NEXT: vfsgnjn.vf v8, v8, fa0
594 ; ZVFHMIN-LABEL: vfcopynsign_vf_nxv4f16:
596 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
597 ; ZVFHMIN-NEXT: lui a1, 8
598 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma
599 ; ZVFHMIN-NEXT: vmv.v.x v9, a0
600 ; ZVFHMIN-NEXT: addi a0, a1, -1
601 ; ZVFHMIN-NEXT: vxor.vx v9, v9, a1
602 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
603 ; ZVFHMIN-NEXT: vand.vx v9, v9, a1
604 ; ZVFHMIN-NEXT: vor.vv v8, v8, v9
606 %head = insertelement <vscale x 4 x half> poison, half %s, i32 0
607 %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
608 %n = fneg <vscale x 4 x half> %splat
609 %r = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> %vm, <vscale x 4 x half> %n)
610 ret <vscale x 4 x half> %r
613 declare <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>)
615 define <vscale x 8 x half> @vfcopysign_vv_nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %vs) {
616 ; ZVFH-LABEL: vfcopysign_vv_nxv8f16:
618 ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
619 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v10
622 ; ZVFHMIN-LABEL: vfcopysign_vv_nxv8f16:
624 ; ZVFHMIN-NEXT: lui a0, 8
625 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
626 ; ZVFHMIN-NEXT: vand.vx v10, v10, a0
627 ; ZVFHMIN-NEXT: addi a0, a0, -1
628 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
629 ; ZVFHMIN-NEXT: vor.vv v8, v8, v10
631 %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %vs)
632 ret <vscale x 8 x half> %r
635 define <vscale x 8 x half> @vfcopysign_vf_nxv8f16(<vscale x 8 x half> %vm, half %s) {
636 ; ZVFH-LABEL: vfcopysign_vf_nxv8f16:
638 ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
639 ; ZVFH-NEXT: vfsgnj.vf v8, v8, fa0
642 ; ZVFHMIN-LABEL: vfcopysign_vf_nxv8f16:
644 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
645 ; ZVFHMIN-NEXT: lui a1, 8
646 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
647 ; ZVFHMIN-NEXT: vmv.v.x v10, a0
648 ; ZVFHMIN-NEXT: addi a0, a1, -1
649 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
650 ; ZVFHMIN-NEXT: vand.vx v10, v10, a1
651 ; ZVFHMIN-NEXT: vor.vv v8, v8, v10
653 %head = insertelement <vscale x 8 x half> poison, half %s, i32 0
654 %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
655 %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %splat)
656 ret <vscale x 8 x half> %r
659 define <vscale x 8 x half> @vfcopynsign_vv_nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %vs) {
660 ; ZVFH-LABEL: vfcopynsign_vv_nxv8f16:
662 ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
663 ; ZVFH-NEXT: vfsgnjn.vv v8, v8, v10
666 ; ZVFHMIN-LABEL: vfcopynsign_vv_nxv8f16:
668 ; ZVFHMIN-NEXT: lui a0, 8
669 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
670 ; ZVFHMIN-NEXT: vxor.vx v10, v10, a0
671 ; ZVFHMIN-NEXT: vand.vx v10, v10, a0
672 ; ZVFHMIN-NEXT: addi a0, a0, -1
673 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
674 ; ZVFHMIN-NEXT: vor.vv v8, v8, v10
676 %n = fneg <vscale x 8 x half> %vs
677 %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %n)
678 ret <vscale x 8 x half> %r
681 define <vscale x 8 x half> @vfcopynsign_vf_nxv8f16(<vscale x 8 x half> %vm, half %s) {
682 ; ZVFH-LABEL: vfcopynsign_vf_nxv8f16:
684 ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
685 ; ZVFH-NEXT: vfsgnjn.vf v8, v8, fa0
688 ; ZVFHMIN-LABEL: vfcopynsign_vf_nxv8f16:
690 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
691 ; ZVFHMIN-NEXT: lui a1, 8
692 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
693 ; ZVFHMIN-NEXT: vmv.v.x v10, a0
694 ; ZVFHMIN-NEXT: addi a0, a1, -1
695 ; ZVFHMIN-NEXT: vxor.vx v10, v10, a1
696 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
697 ; ZVFHMIN-NEXT: vand.vx v10, v10, a1
698 ; ZVFHMIN-NEXT: vor.vv v8, v8, v10
700 %head = insertelement <vscale x 8 x half> poison, half %s, i32 0
701 %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
702 %n = fneg <vscale x 8 x half> %splat
703 %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %n)
704 ret <vscale x 8 x half> %r
707 define <vscale x 8 x half> @vfcopysign_exttrunc_vv_nxv8f16_nxv8f32(<vscale x 8 x half> %vm, <vscale x 8 x float> %vs) {
708 ; ZVFH-LABEL: vfcopysign_exttrunc_vv_nxv8f16_nxv8f32:
710 ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
711 ; ZVFH-NEXT: vfncvt.f.f.w v10, v12
712 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v10
715 ; ZVFHMIN-LABEL: vfcopysign_exttrunc_vv_nxv8f16_nxv8f32:
717 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
718 ; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v12
719 ; ZVFHMIN-NEXT: lui a0, 8
720 ; ZVFHMIN-NEXT: vand.vx v10, v10, a0
721 ; ZVFHMIN-NEXT: addi a0, a0, -1
722 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
723 ; ZVFHMIN-NEXT: vor.vv v8, v8, v10
725 %e = fptrunc <vscale x 8 x float> %vs to <vscale x 8 x half>
726 %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %e)
727 ret <vscale x 8 x half> %r
730 define <vscale x 8 x half> @vfcopysign_exttrunc_vf_nxv8f16_nxv8f32(<vscale x 8 x half> %vm, float %s) {
731 ; ZVFH-LABEL: vfcopysign_exttrunc_vf_nxv8f16_nxv8f32:
733 ; ZVFH-NEXT: vsetvli a0, zero, e32, m4, ta, ma
734 ; ZVFH-NEXT: vfmv.v.f v12, fa0
735 ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
736 ; ZVFH-NEXT: vfncvt.f.f.w v10, v12
737 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v10
740 ; ZVFHMIN-LABEL: vfcopysign_exttrunc_vf_nxv8f16_nxv8f32:
742 ; ZVFHMIN-NEXT: vsetvli a0, zero, e32, m4, ta, ma
743 ; ZVFHMIN-NEXT: vfmv.v.f v12, fa0
744 ; ZVFHMIN-NEXT: lui a0, 8
745 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
746 ; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v12
747 ; ZVFHMIN-NEXT: addi a1, a0, -1
748 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
749 ; ZVFHMIN-NEXT: vand.vx v10, v10, a0
750 ; ZVFHMIN-NEXT: vor.vv v8, v8, v10
752 %head = insertelement <vscale x 8 x float> poison, float %s, i32 0
753 %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
754 %esplat = fptrunc <vscale x 8 x float> %splat to <vscale x 8 x half>
755 %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %esplat)
756 ret <vscale x 8 x half> %r
759 define <vscale x 8 x half> @vfcopynsign_exttrunc_vv_nxv8f16_nxv8f32(<vscale x 8 x half> %vm, <vscale x 8 x float> %vs) {
760 ; ZVFH-LABEL: vfcopynsign_exttrunc_vv_nxv8f16_nxv8f32:
762 ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
763 ; ZVFH-NEXT: vfncvt.f.f.w v10, v12
764 ; ZVFH-NEXT: vfsgnjn.vv v8, v8, v10
767 ; ZVFHMIN-LABEL: vfcopynsign_exttrunc_vv_nxv8f16_nxv8f32:
769 ; ZVFHMIN-NEXT: lui a0, 8
770 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
771 ; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v12
772 ; ZVFHMIN-NEXT: addi a1, a0, -1
773 ; ZVFHMIN-NEXT: vxor.vx v10, v10, a0
774 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
775 ; ZVFHMIN-NEXT: vand.vx v10, v10, a0
776 ; ZVFHMIN-NEXT: vor.vv v8, v8, v10
778 %n = fneg <vscale x 8 x float> %vs
779 %eneg = fptrunc <vscale x 8 x float> %n to <vscale x 8 x half>
780 %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %eneg)
781 ret <vscale x 8 x half> %r
784 define <vscale x 8 x half> @vfcopynsign_exttrunc_vf_nxv8f16_nxv8f32(<vscale x 8 x half> %vm, float %s) {
785 ; ZVFH-LABEL: vfcopynsign_exttrunc_vf_nxv8f16_nxv8f32:
787 ; ZVFH-NEXT: vsetvli a0, zero, e32, m4, ta, ma
788 ; ZVFH-NEXT: vfmv.v.f v12, fa0
789 ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
790 ; ZVFH-NEXT: vfncvt.f.f.w v10, v12
791 ; ZVFH-NEXT: vfsgnjn.vv v8, v8, v10
794 ; ZVFHMIN-LABEL: vfcopynsign_exttrunc_vf_nxv8f16_nxv8f32:
796 ; ZVFHMIN-NEXT: vsetvli a0, zero, e32, m4, ta, ma
797 ; ZVFHMIN-NEXT: vfmv.v.f v12, fa0
798 ; ZVFHMIN-NEXT: lui a0, 8
799 ; ZVFHMIN-NEXT: addi a1, a0, -1
800 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
801 ; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v12
802 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
803 ; ZVFHMIN-NEXT: vxor.vx v10, v10, a0
804 ; ZVFHMIN-NEXT: vand.vx v10, v10, a0
805 ; ZVFHMIN-NEXT: vor.vv v8, v8, v10
807 %head = insertelement <vscale x 8 x float> poison, float %s, i32 0
808 %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
809 %n = fneg <vscale x 8 x float> %splat
810 %eneg = fptrunc <vscale x 8 x float> %n to <vscale x 8 x half>
811 %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %eneg)
812 ret <vscale x 8 x half> %r
815 define <vscale x 8 x half> @vfcopysign_exttrunc_vv_nxv8f16_nxv8f64(<vscale x 8 x half> %vm, <vscale x 8 x double> %vs) {
816 ; ZVFH-LABEL: vfcopysign_exttrunc_vv_nxv8f16_nxv8f64:
818 ; ZVFH-NEXT: vsetvli a0, zero, e32, m4, ta, ma
819 ; ZVFH-NEXT: vfncvt.rod.f.f.w v12, v16
820 ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
821 ; ZVFH-NEXT: vfncvt.f.f.w v10, v12
822 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v10
825 ; ZVFHMIN-LABEL: vfcopysign_exttrunc_vv_nxv8f16_nxv8f64:
827 ; ZVFHMIN-NEXT: vsetvli a0, zero, e32, m4, ta, ma
828 ; ZVFHMIN-NEXT: vfncvt.rod.f.f.w v12, v16
829 ; ZVFHMIN-NEXT: lui a0, 8
830 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
831 ; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v12
832 ; ZVFHMIN-NEXT: addi a1, a0, -1
833 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
834 ; ZVFHMIN-NEXT: vand.vx v10, v10, a0
835 ; ZVFHMIN-NEXT: vor.vv v8, v8, v10
837 %e = fptrunc <vscale x 8 x double> %vs to <vscale x 8 x half>
838 %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %e)
839 ret <vscale x 8 x half> %r
842 define <vscale x 8 x half> @vfcopysign_exttrunc_vf_nxv8f16_nxv8f64(<vscale x 8 x half> %vm, double %s) {
843 ; ZVFH-LABEL: vfcopysign_exttrunc_vf_nxv8f16_nxv8f64:
845 ; ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma
846 ; ZVFH-NEXT: vfmv.v.f v16, fa0
847 ; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma
848 ; ZVFH-NEXT: vfncvt.rod.f.f.w v12, v16
849 ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
850 ; ZVFH-NEXT: vfncvt.f.f.w v10, v12
851 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v10
854 ; ZVFHMIN-LABEL: vfcopysign_exttrunc_vf_nxv8f16_nxv8f64:
856 ; ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma
857 ; ZVFHMIN-NEXT: vfmv.v.f v16, fa0
858 ; ZVFHMIN-NEXT: lui a0, 8
859 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
860 ; ZVFHMIN-NEXT: vfncvt.rod.f.f.w v12, v16
861 ; ZVFHMIN-NEXT: addi a1, a0, -1
862 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
863 ; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v12
864 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
865 ; ZVFHMIN-NEXT: vand.vx v10, v10, a0
866 ; ZVFHMIN-NEXT: vor.vv v8, v8, v10
868 %head = insertelement <vscale x 8 x double> poison, double %s, i32 0
869 %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
870 %esplat = fptrunc <vscale x 8 x double> %splat to <vscale x 8 x half>
871 %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %esplat)
872 ret <vscale x 8 x half> %r
875 define <vscale x 8 x half> @vfcopynsign_exttrunc_vv_nxv8f16_nxv8f64(<vscale x 8 x half> %vm, <vscale x 8 x double> %vs) {
876 ; ZVFH-LABEL: vfcopynsign_exttrunc_vv_nxv8f16_nxv8f64:
878 ; ZVFH-NEXT: vsetvli a0, zero, e32, m4, ta, ma
879 ; ZVFH-NEXT: vfncvt.rod.f.f.w v12, v16
880 ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
881 ; ZVFH-NEXT: vfncvt.f.f.w v10, v12
882 ; ZVFH-NEXT: vfsgnjn.vv v8, v8, v10
885 ; ZVFHMIN-LABEL: vfcopynsign_exttrunc_vv_nxv8f16_nxv8f64:
887 ; ZVFHMIN-NEXT: lui a0, 8
888 ; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m4, ta, ma
889 ; ZVFHMIN-NEXT: vfncvt.rod.f.f.w v12, v16
890 ; ZVFHMIN-NEXT: addi a1, a0, -1
891 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
892 ; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v12
893 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
894 ; ZVFHMIN-NEXT: vxor.vx v10, v10, a0
895 ; ZVFHMIN-NEXT: vand.vx v10, v10, a0
896 ; ZVFHMIN-NEXT: vor.vv v8, v8, v10
898 %n = fneg <vscale x 8 x double> %vs
899 %eneg = fptrunc <vscale x 8 x double> %n to <vscale x 8 x half>
900 %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %eneg)
901 ret <vscale x 8 x half> %r
904 define <vscale x 8 x half> @vfcopynsign_exttrunc_vf_nxv8f16_nxv8f64(<vscale x 8 x half> %vm, double %s) {
905 ; ZVFH-LABEL: vfcopynsign_exttrunc_vf_nxv8f16_nxv8f64:
907 ; ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma
908 ; ZVFH-NEXT: vfmv.v.f v16, fa0
909 ; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma
910 ; ZVFH-NEXT: vfncvt.rod.f.f.w v12, v16
911 ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
912 ; ZVFH-NEXT: vfncvt.f.f.w v10, v12
913 ; ZVFH-NEXT: vfsgnjn.vv v8, v8, v10
916 ; ZVFHMIN-LABEL: vfcopynsign_exttrunc_vf_nxv8f16_nxv8f64:
918 ; ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma
919 ; ZVFHMIN-NEXT: vfmv.v.f v16, fa0
920 ; ZVFHMIN-NEXT: lui a0, 8
921 ; ZVFHMIN-NEXT: addi a1, a0, -1
922 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
923 ; ZVFHMIN-NEXT: vfncvt.rod.f.f.w v12, v16
924 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
925 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
926 ; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v12
927 ; ZVFHMIN-NEXT: vxor.vx v10, v10, a0
928 ; ZVFHMIN-NEXT: vand.vx v10, v10, a0
929 ; ZVFHMIN-NEXT: vor.vv v8, v8, v10
931 %head = insertelement <vscale x 8 x double> poison, double %s, i32 0
932 %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
933 %n = fneg <vscale x 8 x double> %splat
934 %eneg = fptrunc <vscale x 8 x double> %n to <vscale x 8 x half>
935 %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %eneg)
936 ret <vscale x 8 x half> %r
939 declare <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>)
941 define <vscale x 16 x half> @vfcopysign_vv_nxv16f16(<vscale x 16 x half> %vm, <vscale x 16 x half> %vs) {
942 ; ZVFH-LABEL: vfcopysign_vv_nxv16f16:
944 ; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
945 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v12
948 ; ZVFHMIN-LABEL: vfcopysign_vv_nxv16f16:
950 ; ZVFHMIN-NEXT: lui a0, 8
951 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
952 ; ZVFHMIN-NEXT: vand.vx v12, v12, a0
953 ; ZVFHMIN-NEXT: addi a0, a0, -1
954 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
955 ; ZVFHMIN-NEXT: vor.vv v8, v8, v12
957 %r = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> %vm, <vscale x 16 x half> %vs)
958 ret <vscale x 16 x half> %r
961 define <vscale x 16 x half> @vfcopysign_vf_nxv16f16(<vscale x 16 x half> %vm, half %s) {
962 ; ZVFH-LABEL: vfcopysign_vf_nxv16f16:
964 ; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
965 ; ZVFH-NEXT: vfsgnj.vf v8, v8, fa0
968 ; ZVFHMIN-LABEL: vfcopysign_vf_nxv16f16:
970 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
971 ; ZVFHMIN-NEXT: lui a1, 8
972 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
973 ; ZVFHMIN-NEXT: vmv.v.x v12, a0
974 ; ZVFHMIN-NEXT: addi a0, a1, -1
975 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
976 ; ZVFHMIN-NEXT: vand.vx v12, v12, a1
977 ; ZVFHMIN-NEXT: vor.vv v8, v8, v12
979 %head = insertelement <vscale x 16 x half> poison, half %s, i32 0
980 %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
981 %r = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> %vm, <vscale x 16 x half> %splat)
982 ret <vscale x 16 x half> %r
985 define <vscale x 16 x half> @vfcopynsign_vv_nxv16f16(<vscale x 16 x half> %vm, <vscale x 16 x half> %vs) {
986 ; ZVFH-LABEL: vfcopynsign_vv_nxv16f16:
988 ; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
989 ; ZVFH-NEXT: vfsgnjn.vv v8, v8, v12
992 ; ZVFHMIN-LABEL: vfcopynsign_vv_nxv16f16:
994 ; ZVFHMIN-NEXT: lui a0, 8
995 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
996 ; ZVFHMIN-NEXT: vxor.vx v12, v12, a0
997 ; ZVFHMIN-NEXT: vand.vx v12, v12, a0
998 ; ZVFHMIN-NEXT: addi a0, a0, -1
999 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
1000 ; ZVFHMIN-NEXT: vor.vv v8, v8, v12
1002 %n = fneg <vscale x 16 x half> %vs
1003 %r = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> %vm, <vscale x 16 x half> %n)
1004 ret <vscale x 16 x half> %r
1007 define <vscale x 16 x half> @vfcopynsign_vf_nxv16f16(<vscale x 16 x half> %vm, half %s) {
1008 ; ZVFH-LABEL: vfcopynsign_vf_nxv16f16:
1010 ; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
1011 ; ZVFH-NEXT: vfsgnjn.vf v8, v8, fa0
1014 ; ZVFHMIN-LABEL: vfcopynsign_vf_nxv16f16:
1016 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
1017 ; ZVFHMIN-NEXT: lui a1, 8
1018 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
1019 ; ZVFHMIN-NEXT: vmv.v.x v12, a0
1020 ; ZVFHMIN-NEXT: addi a0, a1, -1
1021 ; ZVFHMIN-NEXT: vxor.vx v12, v12, a1
1022 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
1023 ; ZVFHMIN-NEXT: vand.vx v12, v12, a1
1024 ; ZVFHMIN-NEXT: vor.vv v8, v8, v12
1026 %head = insertelement <vscale x 16 x half> poison, half %s, i32 0
1027 %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
1028 %n = fneg <vscale x 16 x half> %splat
1029 %r = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> %vm, <vscale x 16 x half> %n)
1030 ret <vscale x 16 x half> %r
1033 declare <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>)
1035 define <vscale x 32 x half> @vfcopysign_vv_nxv32f16(<vscale x 32 x half> %vm, <vscale x 32 x half> %vs) {
1036 ; ZVFH-LABEL: vfcopysign_vv_nxv32f16:
1038 ; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
1039 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v16
1042 ; ZVFHMIN-LABEL: vfcopysign_vv_nxv32f16:
1044 ; ZVFHMIN-NEXT: lui a0, 8
1045 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
1046 ; ZVFHMIN-NEXT: vand.vx v16, v16, a0
1047 ; ZVFHMIN-NEXT: addi a0, a0, -1
1048 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
1049 ; ZVFHMIN-NEXT: vor.vv v8, v8, v16
1051 %r = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> %vm, <vscale x 32 x half> %vs)
1052 ret <vscale x 32 x half> %r
1055 define <vscale x 32 x half> @vfcopysign_vf_nxv32f16(<vscale x 32 x half> %vm, half %s) {
1056 ; ZVFH-LABEL: vfcopysign_vf_nxv32f16:
1058 ; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
1059 ; ZVFH-NEXT: vfsgnj.vf v8, v8, fa0
1062 ; ZVFHMIN-LABEL: vfcopysign_vf_nxv32f16:
1064 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
1065 ; ZVFHMIN-NEXT: lui a1, 8
1066 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma
1067 ; ZVFHMIN-NEXT: vmv.v.x v16, a0
1068 ; ZVFHMIN-NEXT: addi a0, a1, -1
1069 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
1070 ; ZVFHMIN-NEXT: vand.vx v16, v16, a1
1071 ; ZVFHMIN-NEXT: vor.vv v8, v8, v16
1073 %head = insertelement <vscale x 32 x half> poison, half %s, i32 0
1074 %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
1075 %r = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> %vm, <vscale x 32 x half> %splat)
1076 ret <vscale x 32 x half> %r
1079 define <vscale x 32 x half> @vfcopynsign_vv_nxv32f16(<vscale x 32 x half> %vm, <vscale x 32 x half> %vs) {
1080 ; ZVFH-LABEL: vfcopynsign_vv_nxv32f16:
1082 ; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
1083 ; ZVFH-NEXT: vfsgnjn.vv v8, v8, v16
1086 ; ZVFHMIN-LABEL: vfcopynsign_vv_nxv32f16:
1088 ; ZVFHMIN-NEXT: lui a0, 8
1089 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
1090 ; ZVFHMIN-NEXT: vxor.vx v16, v16, a0
1091 ; ZVFHMIN-NEXT: vand.vx v16, v16, a0
1092 ; ZVFHMIN-NEXT: addi a0, a0, -1
1093 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
1094 ; ZVFHMIN-NEXT: vor.vv v8, v8, v16
1096 %n = fneg <vscale x 32 x half> %vs
1097 %r = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> %vm, <vscale x 32 x half> %n)
1098 ret <vscale x 32 x half> %r
1101 define <vscale x 32 x half> @vfcopynsign_vf_nxv32f16(<vscale x 32 x half> %vm, half %s) {
1102 ; ZVFH-LABEL: vfcopynsign_vf_nxv32f16:
1104 ; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
1105 ; ZVFH-NEXT: vfsgnjn.vf v8, v8, fa0
1108 ; ZVFHMIN-LABEL: vfcopynsign_vf_nxv32f16:
1110 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
1111 ; ZVFHMIN-NEXT: lui a1, 8
1112 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma
1113 ; ZVFHMIN-NEXT: vmv.v.x v16, a0
1114 ; ZVFHMIN-NEXT: addi a0, a1, -1
1115 ; ZVFHMIN-NEXT: vxor.vx v16, v16, a1
1116 ; ZVFHMIN-NEXT: vand.vx v8, v8, a0
1117 ; ZVFHMIN-NEXT: vand.vx v16, v16, a1
1118 ; ZVFHMIN-NEXT: vor.vv v8, v8, v16
1120 %head = insertelement <vscale x 32 x half> poison, half %s, i32 0
1121 %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
1122 %n = fneg <vscale x 32 x half> %splat
1123 %r = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> %vm, <vscale x 32 x half> %n)
1124 ret <vscale x 32 x half> %r
1127 declare <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>)
1129 define <vscale x 1 x float> @vfcopysign_vv_nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %vs) {
1130 ; CHECK-LABEL: vfcopysign_vv_nxv1f32:
1132 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1133 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
1135 %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %vs)
1136 ret <vscale x 1 x float> %r
1139 define <vscale x 1 x float> @vfcopysign_vf_nxv1f32(<vscale x 1 x float> %vm, float %s) {
1140 ; CHECK-LABEL: vfcopysign_vf_nxv1f32:
1142 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1143 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1145 %head = insertelement <vscale x 1 x float> poison, float %s, i32 0
1146 %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
1147 %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %splat)
1148 ret <vscale x 1 x float> %r
1151 define <vscale x 1 x float> @vfcopynsign_vv_nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %vs) {
1152 ; CHECK-LABEL: vfcopynsign_vv_nxv1f32:
1154 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1155 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9
1157 %n = fneg <vscale x 1 x float> %vs
1158 %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %n)
1159 ret <vscale x 1 x float> %r
1162 define <vscale x 1 x float> @vfcopynsign_vf_nxv1f32(<vscale x 1 x float> %vm, float %s) {
1163 ; CHECK-LABEL: vfcopynsign_vf_nxv1f32:
1165 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1166 ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
1168 %head = insertelement <vscale x 1 x float> poison, float %s, i32 0
1169 %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
1170 %n = fneg <vscale x 1 x float> %splat
1171 %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %n)
1172 ret <vscale x 1 x float> %r
1175 define <vscale x 1 x float> @vfcopysign_exttrunc_vv_nxv1f32_nxv1f16(<vscale x 1 x float> %vm, <vscale x 1 x half> %vs) {
1176 ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv1f32_nxv1f16:
1178 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
1179 ; CHECK-NEXT: vfwcvt.f.f.v v10, v9
1180 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
1181 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10
1183 %e = fpext <vscale x 1 x half> %vs to <vscale x 1 x float>
1184 %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %e)
1185 ret <vscale x 1 x float> %r
1188 define <vscale x 1 x float> @vfcopysign_exttrunc_vf_nxv1f32_nxv1f16(<vscale x 1 x float> %vm, half %s) {
1189 ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f32_nxv1f16:
1191 ; CHECK-NEXT: fcvt.s.h fa5, fa0
1192 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1193 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa5
1195 %head = insertelement <vscale x 1 x half> poison, half %s, i32 0
1196 %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
1197 %esplat = fpext <vscale x 1 x half> %splat to <vscale x 1 x float>
1198 %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %esplat)
1199 ret <vscale x 1 x float> %r
1202 define <vscale x 1 x float> @vfcopynsign_exttrunc_vv_nxv1f32_nxv1f16(<vscale x 1 x float> %vm, <vscale x 1 x half> %vs) {
1203 ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv1f32_nxv1f16:
1205 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
1206 ; CHECK-NEXT: vfwcvt.f.f.v v10, v9
1207 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
1208 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10
1210 %n = fneg <vscale x 1 x half> %vs
1211 %eneg = fpext <vscale x 1 x half> %n to <vscale x 1 x float>
1212 %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %eneg)
1213 ret <vscale x 1 x float> %r
1216 define <vscale x 1 x float> @vfcopynsign_exttrunc_vf_nxv1f32_nxv1f16(<vscale x 1 x float> %vm, half %s) {
1217 ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f32_nxv1f16:
1219 ; CHECK-NEXT: fcvt.s.h fa5, fa0
1220 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1221 ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa5
1223 %head = insertelement <vscale x 1 x half> poison, half %s, i32 0
1224 %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
1225 %n = fneg <vscale x 1 x half> %splat
1226 %eneg = fpext <vscale x 1 x half> %n to <vscale x 1 x float>
1227 %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %eneg)
1228 ret <vscale x 1 x float> %r
1231 define <vscale x 1 x float> @vfcopysign_exttrunc_vv_nxv1f32_nxv1f64(<vscale x 1 x float> %vm, <vscale x 1 x double> %vs) {
1232 ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv1f32_nxv1f64:
1234 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1235 ; CHECK-NEXT: vfncvt.f.f.w v10, v9
1236 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10
1238 %e = fptrunc <vscale x 1 x double> %vs to <vscale x 1 x float>
1239 %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %e)
1240 ret <vscale x 1 x float> %r
1243 define <vscale x 1 x float> @vfcopysign_exttrunc_vf_nxv1f32_nxv1f64(<vscale x 1 x float> %vm, double %s) {
1244 ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f32_nxv1f64:
1246 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
1247 ; CHECK-NEXT: vfmv.v.f v9, fa0
1248 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
1249 ; CHECK-NEXT: vfncvt.f.f.w v10, v9
1250 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10
1252 %head = insertelement <vscale x 1 x double> poison, double %s, i32 0
1253 %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
1254 %esplat = fptrunc <vscale x 1 x double> %splat to <vscale x 1 x float>
1255 %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %esplat)
1256 ret <vscale x 1 x float> %r
1259 define <vscale x 1 x float> @vfcopynsign_exttrunc_vv_nxv1f32_nxv1f64(<vscale x 1 x float> %vm, <vscale x 1 x double> %vs) {
1260 ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv1f32_nxv1f64:
1262 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1263 ; CHECK-NEXT: vfncvt.f.f.w v10, v9
1264 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10
1266 %n = fneg <vscale x 1 x double> %vs
1267 %eneg = fptrunc <vscale x 1 x double> %n to <vscale x 1 x float>
1268 %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %eneg)
1269 ret <vscale x 1 x float> %r
1272 define <vscale x 1 x float> @vfcopynsign_exttrunc_vf_nxv1f32_nxv1f64(<vscale x 1 x float> %vm, double %s) {
1273 ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f32_nxv1f64:
1275 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
1276 ; CHECK-NEXT: vfmv.v.f v9, fa0
1277 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
1278 ; CHECK-NEXT: vfncvt.f.f.w v10, v9
1279 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10
1281 %head = insertelement <vscale x 1 x double> poison, double %s, i32 0
1282 %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
1283 %n = fneg <vscale x 1 x double> %splat
1284 %eneg = fptrunc <vscale x 1 x double> %n to <vscale x 1 x float>
1285 %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %eneg)
1286 ret <vscale x 1 x float> %r
1289 declare <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>)
1291 define <vscale x 2 x float> @vfcopysign_vv_nxv2f32(<vscale x 2 x float> %vm, <vscale x 2 x float> %vs) {
1292 ; CHECK-LABEL: vfcopysign_vv_nxv2f32:
1294 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1295 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
1297 %r = call <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float> %vm, <vscale x 2 x float> %vs)
1298 ret <vscale x 2 x float> %r
1301 define <vscale x 2 x float> @vfcopysign_vf_nxv2f32(<vscale x 2 x float> %vm, float %s) {
1302 ; CHECK-LABEL: vfcopysign_vf_nxv2f32:
1304 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1305 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1307 %head = insertelement <vscale x 2 x float> poison, float %s, i32 0
1308 %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
1309 %r = call <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float> %vm, <vscale x 2 x float> %splat)
1310 ret <vscale x 2 x float> %r
1313 define <vscale x 2 x float> @vfcopynsign_vv_nxv2f32(<vscale x 2 x float> %vm, <vscale x 2 x float> %vs) {
1314 ; CHECK-LABEL: vfcopynsign_vv_nxv2f32:
1316 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1317 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9
1319 %n = fneg <vscale x 2 x float> %vs
1320 %r = call <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float> %vm, <vscale x 2 x float> %n)
1321 ret <vscale x 2 x float> %r
1324 define <vscale x 2 x float> @vfcopynsign_vf_nxv2f32(<vscale x 2 x float> %vm, float %s) {
1325 ; CHECK-LABEL: vfcopynsign_vf_nxv2f32:
1327 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1328 ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
1330 %head = insertelement <vscale x 2 x float> poison, float %s, i32 0
1331 %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
1332 %n = fneg <vscale x 2 x float> %splat
1333 %r = call <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float> %vm, <vscale x 2 x float> %n)
1334 ret <vscale x 2 x float> %r
1337 declare <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
1339 define <vscale x 4 x float> @vfcopysign_vv_nxv4f32(<vscale x 4 x float> %vm, <vscale x 4 x float> %vs) {
1340 ; CHECK-LABEL: vfcopysign_vv_nxv4f32:
1342 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1343 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10
1345 %r = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> %vm, <vscale x 4 x float> %vs)
1346 ret <vscale x 4 x float> %r
1349 define <vscale x 4 x float> @vfcopysign_vf_nxv4f32(<vscale x 4 x float> %vm, float %s) {
1350 ; CHECK-LABEL: vfcopysign_vf_nxv4f32:
1352 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1353 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1355 %head = insertelement <vscale x 4 x float> poison, float %s, i32 0
1356 %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
1357 %r = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> %vm, <vscale x 4 x float> %splat)
1358 ret <vscale x 4 x float> %r
1361 define <vscale x 4 x float> @vfcopynsign_vv_nxv4f32(<vscale x 4 x float> %vm, <vscale x 4 x float> %vs) {
1362 ; CHECK-LABEL: vfcopynsign_vv_nxv4f32:
1364 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1365 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10
1367 %n = fneg <vscale x 4 x float> %vs
1368 %r = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> %vm, <vscale x 4 x float> %n)
1369 ret <vscale x 4 x float> %r
1372 define <vscale x 4 x float> @vfcopynsign_vf_nxv4f32(<vscale x 4 x float> %vm, float %s) {
1373 ; CHECK-LABEL: vfcopynsign_vf_nxv4f32:
1375 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1376 ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
1378 %head = insertelement <vscale x 4 x float> poison, float %s, i32 0
1379 %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
1380 %n = fneg <vscale x 4 x float> %splat
1381 %r = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> %vm, <vscale x 4 x float> %n)
1382 ret <vscale x 4 x float> %r
1385 declare <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>)
1387 define <vscale x 8 x float> @vfcopysign_vv_nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %vs) {
1388 ; CHECK-LABEL: vfcopysign_vv_nxv8f32:
1390 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1391 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12
1393 %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %vs)
1394 ret <vscale x 8 x float> %r
1397 define <vscale x 8 x float> @vfcopysign_vf_nxv8f32(<vscale x 8 x float> %vm, float %s) {
1398 ; CHECK-LABEL: vfcopysign_vf_nxv8f32:
1400 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1401 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1403 %head = insertelement <vscale x 8 x float> poison, float %s, i32 0
1404 %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
1405 %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %splat)
1406 ret <vscale x 8 x float> %r
1409 define <vscale x 8 x float> @vfcopynsign_vv_nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %vs) {
1410 ; CHECK-LABEL: vfcopynsign_vv_nxv8f32:
1412 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1413 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v12
1415 %n = fneg <vscale x 8 x float> %vs
1416 %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %n)
1417 ret <vscale x 8 x float> %r
1420 define <vscale x 8 x float> @vfcopynsign_vf_nxv8f32(<vscale x 8 x float> %vm, float %s) {
1421 ; CHECK-LABEL: vfcopynsign_vf_nxv8f32:
1423 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1424 ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
1426 %head = insertelement <vscale x 8 x float> poison, float %s, i32 0
1427 %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
1428 %n = fneg <vscale x 8 x float> %splat
1429 %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %n)
1430 ret <vscale x 8 x float> %r
1433 define <vscale x 8 x float> @vfcopysign_exttrunc_vv_nxv8f32_nxv8f16(<vscale x 8 x float> %vm, <vscale x 8 x half> %vs) {
1434 ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv8f32_nxv8f16:
1436 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1437 ; CHECK-NEXT: vfwcvt.f.f.v v16, v12
1438 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
1439 ; CHECK-NEXT: vfsgnj.vv v8, v8, v16
1441 %e = fpext <vscale x 8 x half> %vs to <vscale x 8 x float>
1442 %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %e)
1443 ret <vscale x 8 x float> %r
1446 define <vscale x 8 x float> @vfcopysign_exttrunc_vf_nxv8f32_nxv8f16(<vscale x 8 x float> %vm, half %s) {
1447 ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f32_nxv8f16:
1449 ; CHECK-NEXT: fcvt.s.h fa5, fa0
1450 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1451 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa5
1453 %head = insertelement <vscale x 8 x half> poison, half %s, i32 0
1454 %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
1455 %esplat = fpext <vscale x 8 x half> %splat to <vscale x 8 x float>
1456 %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %esplat)
1457 ret <vscale x 8 x float> %r
1460 define <vscale x 8 x float> @vfcopynsign_exttrunc_vv_nxv8f32_nxv8f16(<vscale x 8 x float> %vm, <vscale x 8 x half> %vs) {
1461 ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv8f32_nxv8f16:
1463 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1464 ; CHECK-NEXT: vfwcvt.f.f.v v16, v12
1465 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
1466 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v16
1468 %n = fneg <vscale x 8 x half> %vs
1469 %eneg = fpext <vscale x 8 x half> %n to <vscale x 8 x float>
1470 %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %eneg)
1471 ret <vscale x 8 x float> %r
1474 define <vscale x 8 x float> @vfcopynsign_exttrunc_vf_nxv8f32_nxv8f16(<vscale x 8 x float> %vm, half %s) {
1475 ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f32_nxv8f16:
1477 ; CHECK-NEXT: fcvt.s.h fa5, fa0
1478 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1479 ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa5
1481 %head = insertelement <vscale x 8 x half> poison, half %s, i32 0
1482 %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
1483 %n = fneg <vscale x 8 x half> %splat
1484 %eneg = fpext <vscale x 8 x half> %n to <vscale x 8 x float>
1485 %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %eneg)
1486 ret <vscale x 8 x float> %r
1489 define <vscale x 8 x float> @vfcopysign_exttrunc_vv_nxv8f32_nxv8f64(<vscale x 8 x float> %vm, <vscale x 8 x double> %vs) {
1490 ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv8f32_nxv8f64:
1492 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1493 ; CHECK-NEXT: vfncvt.f.f.w v12, v16
1494 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12
1496 %e = fptrunc <vscale x 8 x double> %vs to <vscale x 8 x float>
1497 %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %e)
1498 ret <vscale x 8 x float> %r
1501 define <vscale x 8 x float> @vfcopysign_exttrunc_vf_nxv8f32_nxv8f64(<vscale x 8 x float> %vm, double %s) {
1502 ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f32_nxv8f64:
1504 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1505 ; CHECK-NEXT: vfmv.v.f v16, fa0
1506 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
1507 ; CHECK-NEXT: vfncvt.f.f.w v12, v16
1508 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12
1510 %head = insertelement <vscale x 8 x double> poison, double %s, i32 0
1511 %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
1512 %esplat = fptrunc <vscale x 8 x double> %splat to <vscale x 8 x float>
1513 %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %esplat)
1514 ret <vscale x 8 x float> %r
1517 define <vscale x 8 x float> @vfcopynsign_exttrunc_vv_nxv8f32_nxv8f64(<vscale x 8 x float> %vm, <vscale x 8 x double> %vs) {
1518 ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv8f32_nxv8f64:
1520 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1521 ; CHECK-NEXT: vfncvt.f.f.w v12, v16
1522 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v12
1524 %n = fneg <vscale x 8 x double> %vs
1525 %eneg = fptrunc <vscale x 8 x double> %n to <vscale x 8 x float>
1526 %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %eneg)
1527 ret <vscale x 8 x float> %r
1530 define <vscale x 8 x float> @vfcopynsign_exttrunc_vf_nxv8f32_nxv8f64(<vscale x 8 x float> %vm, double %s) {
1531 ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f32_nxv8f64:
1533 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1534 ; CHECK-NEXT: vfmv.v.f v16, fa0
1535 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
1536 ; CHECK-NEXT: vfncvt.f.f.w v12, v16
1537 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v12
1539 %head = insertelement <vscale x 8 x double> poison, double %s, i32 0
1540 %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
1541 %n = fneg <vscale x 8 x double> %splat
1542 %eneg = fptrunc <vscale x 8 x double> %n to <vscale x 8 x float>
1543 %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %eneg)
1544 ret <vscale x 8 x float> %r
1547 declare <vscale x 16 x float> @llvm.copysign.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>)
1549 define <vscale x 16 x float> @vfcopysign_vv_nxv16f32(<vscale x 16 x float> %vm, <vscale x 16 x float> %vs) {
1550 ; CHECK-LABEL: vfcopysign_vv_nxv16f32:
1552 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
1553 ; CHECK-NEXT: vfsgnj.vv v8, v8, v16
1555 %r = call <vscale x 16 x float> @llvm.copysign.nxv16f32(<vscale x 16 x float> %vm, <vscale x 16 x float> %vs)
1556 ret <vscale x 16 x float> %r
1559 define <vscale x 16 x float> @vfcopysign_vf_nxv16f32(<vscale x 16 x float> %vm, float %s) {
1560 ; CHECK-LABEL: vfcopysign_vf_nxv16f32:
1562 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
1563 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1565 %head = insertelement <vscale x 16 x float> poison, float %s, i32 0
1566 %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
1567 %r = call <vscale x 16 x float> @llvm.copysign.nxv16f32(<vscale x 16 x float> %vm, <vscale x 16 x float> %splat)
1568 ret <vscale x 16 x float> %r
1571 define <vscale x 16 x float> @vfcopynsign_vv_nxv16f32(<vscale x 16 x float> %vm, <vscale x 16 x float> %vs) {
1572 ; CHECK-LABEL: vfcopynsign_vv_nxv16f32:
1574 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
1575 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v16
1577 %n = fneg <vscale x 16 x float> %vs
1578 %r = call <vscale x 16 x float> @llvm.copysign.nxv16f32(<vscale x 16 x float> %vm, <vscale x 16 x float> %n)
1579 ret <vscale x 16 x float> %r
1582 define <vscale x 16 x float> @vfcopynsign_vf_nxv16f32(<vscale x 16 x float> %vm, float %s) {
1583 ; CHECK-LABEL: vfcopynsign_vf_nxv16f32:
1585 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
1586 ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
1588 %head = insertelement <vscale x 16 x float> poison, float %s, i32 0
1589 %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
1590 %n = fneg <vscale x 16 x float> %splat
1591 %r = call <vscale x 16 x float> @llvm.copysign.nxv16f32(<vscale x 16 x float> %vm, <vscale x 16 x float> %n)
1592 ret <vscale x 16 x float> %r
1595 declare <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>)
1597 define <vscale x 1 x double> @vfcopysign_vv_nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %vs) {
1598 ; CHECK-LABEL: vfcopysign_vv_nxv1f64:
1600 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
1601 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
1603 %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %vs)
1604 ret <vscale x 1 x double> %r
1607 define <vscale x 1 x double> @vfcopysign_vf_nxv1f64(<vscale x 1 x double> %vm, double %s) {
1608 ; CHECK-LABEL: vfcopysign_vf_nxv1f64:
1610 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
1611 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1613 %head = insertelement <vscale x 1 x double> poison, double %s, i32 0
1614 %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
1615 %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %splat)
1616 ret <vscale x 1 x double> %r
1619 define <vscale x 1 x double> @vfcopynsign_vv_nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %vs) {
1620 ; CHECK-LABEL: vfcopynsign_vv_nxv1f64:
1622 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
1623 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9
1625 %n = fneg <vscale x 1 x double> %vs
1626 %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %n)
1627 ret <vscale x 1 x double> %r
1630 define <vscale x 1 x double> @vfcopynsign_vf_nxv1f64(<vscale x 1 x double> %vm, double %s) {
1631 ; CHECK-LABEL: vfcopynsign_vf_nxv1f64:
1633 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
1634 ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
1636 %head = insertelement <vscale x 1 x double> poison, double %s, i32 0
1637 %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
1638 %n = fneg <vscale x 1 x double> %splat
1639 %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %n)
1640 ret <vscale x 1 x double> %r
1643 define <vscale x 1 x double> @vfcopysign_exttrunc_vv_nxv1f64_nxv1f16(<vscale x 1 x double> %vm, <vscale x 1 x half> %vs) {
1644 ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv1f64_nxv1f16:
1646 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
1647 ; CHECK-NEXT: vfwcvt.f.f.v v10, v9
1648 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
1649 ; CHECK-NEXT: vfwcvt.f.f.v v9, v10
1650 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
1651 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
1653 %e = fpext <vscale x 1 x half> %vs to <vscale x 1 x double>
1654 %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %e)
1655 ret <vscale x 1 x double> %r
1658 define <vscale x 1 x double> @vfcopysign_exttrunc_vf_nxv1f64_nxv1f16(<vscale x 1 x double> %vm, half %s) {
1659 ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f64_nxv1f16:
1661 ; CHECK-NEXT: fcvt.d.h fa5, fa0
1662 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
1663 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa5
1665 %head = insertelement <vscale x 1 x half> poison, half %s, i32 0
1666 %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
1667 %esplat = fpext <vscale x 1 x half> %splat to <vscale x 1 x double>
1668 %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %esplat)
1669 ret <vscale x 1 x double> %r
1672 define <vscale x 1 x double> @vfcopynsign_exttrunc_vv_nxv1f64_nxv1f16(<vscale x 1 x double> %vm, <vscale x 1 x half> %vs) {
1673 ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv1f64_nxv1f16:
1675 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
1676 ; CHECK-NEXT: vfwcvt.f.f.v v10, v9
1677 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
1678 ; CHECK-NEXT: vfwcvt.f.f.v v9, v10
1679 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
1680 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9
1682 %n = fneg <vscale x 1 x half> %vs
1683 %eneg = fpext <vscale x 1 x half> %n to <vscale x 1 x double>
1684 %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %eneg)
1685 ret <vscale x 1 x double> %r
1688 define <vscale x 1 x double> @vfcopynsign_exttrunc_vf_nxv1f64_nxv1f16(<vscale x 1 x double> %vm, half %s) {
1689 ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f64_nxv1f16:
1691 ; CHECK-NEXT: fcvt.d.h fa5, fa0
1692 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
1693 ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa5
1695 %head = insertelement <vscale x 1 x half> poison, half %s, i32 0
1696 %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
1697 %n = fneg <vscale x 1 x half> %splat
1698 %eneg = fpext <vscale x 1 x half> %n to <vscale x 1 x double>
1699 %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %eneg)
1700 ret <vscale x 1 x double> %r
1703 define <vscale x 1 x double> @vfcopysign_exttrunc_vv_nxv1f64_nxv1f32(<vscale x 1 x double> %vm, <vscale x 1 x float> %vs) {
1704 ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv1f64_nxv1f32:
1706 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1707 ; CHECK-NEXT: vfwcvt.f.f.v v10, v9
1708 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
1709 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10
1711 %e = fpext <vscale x 1 x float> %vs to <vscale x 1 x double>
1712 %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %e)
1713 ret <vscale x 1 x double> %r
1716 define <vscale x 1 x double> @vfcopysign_exttrunc_vf_nxv1f64_nxv1f32(<vscale x 1 x double> %vm, float %s) {
1717 ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f64_nxv1f32:
1719 ; CHECK-NEXT: fcvt.d.s fa5, fa0
1720 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
1721 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa5
1723 %head = insertelement <vscale x 1 x float> poison, float %s, i32 0
1724 %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
1725 %esplat = fpext <vscale x 1 x float> %splat to <vscale x 1 x double>
1726 %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %esplat)
1727 ret <vscale x 1 x double> %r
1730 define <vscale x 1 x double> @vfcopynsign_exttrunc_vv_nxv1f64_nxv1f32(<vscale x 1 x double> %vm, <vscale x 1 x float> %vs) {
1731 ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv1f64_nxv1f32:
1733 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1734 ; CHECK-NEXT: vfwcvt.f.f.v v10, v9
1735 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
1736 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10
1738 %n = fneg <vscale x 1 x float> %vs
1739 %eneg = fpext <vscale x 1 x float> %n to <vscale x 1 x double>
1740 %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %eneg)
1741 ret <vscale x 1 x double> %r
1744 define <vscale x 1 x double> @vfcopynsign_exttrunc_vf_nxv1f64_nxv1f32(<vscale x 1 x double> %vm, float %s) {
1745 ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f64_nxv1f32:
1747 ; CHECK-NEXT: fcvt.d.s fa5, fa0
1748 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
1749 ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa5
1751 %head = insertelement <vscale x 1 x float> poison, float %s, i32 0
1752 %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
1753 %n = fneg <vscale x 1 x float> %splat
1754 %eneg = fpext <vscale x 1 x float> %n to <vscale x 1 x double>
1755 %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %eneg)
1756 ret <vscale x 1 x double> %r
1759 declare <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
1761 define <vscale x 2 x double> @vfcopysign_vv_nxv2f64(<vscale x 2 x double> %vm, <vscale x 2 x double> %vs) {
1762 ; CHECK-LABEL: vfcopysign_vv_nxv2f64:
1764 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
1765 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10
1767 %r = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> %vm, <vscale x 2 x double> %vs)
1768 ret <vscale x 2 x double> %r
1771 define <vscale x 2 x double> @vfcopysign_vf_nxv2f64(<vscale x 2 x double> %vm, double %s) {
1772 ; CHECK-LABEL: vfcopysign_vf_nxv2f64:
1774 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
1775 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1777 %head = insertelement <vscale x 2 x double> poison, double %s, i32 0
1778 %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
1779 %r = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> %vm, <vscale x 2 x double> %splat)
1780 ret <vscale x 2 x double> %r
1783 define <vscale x 2 x double> @vfcopynsign_vv_nxv2f64(<vscale x 2 x double> %vm, <vscale x 2 x double> %vs) {
1784 ; CHECK-LABEL: vfcopynsign_vv_nxv2f64:
1786 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
1787 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10
1789 %n = fneg <vscale x 2 x double> %vs
1790 %r = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> %vm, <vscale x 2 x double> %n)
1791 ret <vscale x 2 x double> %r
1794 define <vscale x 2 x double> @vfcopynsign_vf_nxv2f64(<vscale x 2 x double> %vm, double %s) {
1795 ; CHECK-LABEL: vfcopynsign_vf_nxv2f64:
1797 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
1798 ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
1800 %head = insertelement <vscale x 2 x double> poison, double %s, i32 0
1801 %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
1802 %n = fneg <vscale x 2 x double> %splat
1803 %r = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> %vm, <vscale x 2 x double> %n)
1804 ret <vscale x 2 x double> %r
1807 declare <vscale x 4 x double> @llvm.copysign.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>)
1809 define <vscale x 4 x double> @vfcopysign_vv_nxv4f64(<vscale x 4 x double> %vm, <vscale x 4 x double> %vs) {
1810 ; CHECK-LABEL: vfcopysign_vv_nxv4f64:
1812 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
1813 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12
1815 %r = call <vscale x 4 x double> @llvm.copysign.nxv4f64(<vscale x 4 x double> %vm, <vscale x 4 x double> %vs)
1816 ret <vscale x 4 x double> %r
1819 define <vscale x 4 x double> @vfcopysign_vf_nxv4f64(<vscale x 4 x double> %vm, double %s) {
1820 ; CHECK-LABEL: vfcopysign_vf_nxv4f64:
1822 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
1823 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1825 %head = insertelement <vscale x 4 x double> poison, double %s, i32 0
1826 %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
1827 %r = call <vscale x 4 x double> @llvm.copysign.nxv4f64(<vscale x 4 x double> %vm, <vscale x 4 x double> %splat)
1828 ret <vscale x 4 x double> %r
1831 define <vscale x 4 x double> @vfcopynsign_vv_nxv4f64(<vscale x 4 x double> %vm, <vscale x 4 x double> %vs) {
1832 ; CHECK-LABEL: vfcopynsign_vv_nxv4f64:
1834 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
1835 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v12
1837 %n = fneg <vscale x 4 x double> %vs
1838 %r = call <vscale x 4 x double> @llvm.copysign.nxv4f64(<vscale x 4 x double> %vm, <vscale x 4 x double> %n)
1839 ret <vscale x 4 x double> %r
1842 define <vscale x 4 x double> @vfcopynsign_vf_nxv4f64(<vscale x 4 x double> %vm, double %s) {
1843 ; CHECK-LABEL: vfcopynsign_vf_nxv4f64:
1845 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
1846 ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
1848 %head = insertelement <vscale x 4 x double> poison, double %s, i32 0
1849 %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
1850 %n = fneg <vscale x 4 x double> %splat
1851 %r = call <vscale x 4 x double> @llvm.copysign.nxv4f64(<vscale x 4 x double> %vm, <vscale x 4 x double> %n)
1852 ret <vscale x 4 x double> %r
1855 declare <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>)
1857 define <vscale x 8 x double> @vfcopysign_vv_nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %vs) {
1858 ; CHECK-LABEL: vfcopysign_vv_nxv8f64:
1860 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1861 ; CHECK-NEXT: vfsgnj.vv v8, v8, v16
1863 %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %vs)
1864 ret <vscale x 8 x double> %r
1867 define <vscale x 8 x double> @vfcopysign_vf_nxv8f64(<vscale x 8 x double> %vm, double %s) {
1868 ; CHECK-LABEL: vfcopysign_vf_nxv8f64:
1870 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1871 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1873 %head = insertelement <vscale x 8 x double> poison, double %s, i32 0
1874 %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
1875 %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %splat)
1876 ret <vscale x 8 x double> %r
1879 define <vscale x 8 x double> @vfcopynsign_vv_nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %vs) {
1880 ; CHECK-LABEL: vfcopynsign_vv_nxv8f64:
1882 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1883 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v16
1885 %n = fneg <vscale x 8 x double> %vs
1886 %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %n)
1887 ret <vscale x 8 x double> %r
1890 define <vscale x 8 x double> @vfcopynsign_vf_nxv8f64(<vscale x 8 x double> %vm, double %s) {
1891 ; CHECK-LABEL: vfcopynsign_vf_nxv8f64:
1893 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1894 ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
1896 %head = insertelement <vscale x 8 x double> poison, double %s, i32 0
1897 %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
1898 %n = fneg <vscale x 8 x double> %splat
1899 %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %n)
1900 ret <vscale x 8 x double> %r
1903 define <vscale x 8 x double> @vfcopysign_exttrunc_vv_nxv8f64_nxv8f16(<vscale x 8 x double> %vm, <vscale x 8 x half> %vs) {
1904 ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv8f64_nxv8f16:
1906 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1907 ; CHECK-NEXT: vfwcvt.f.f.v v20, v16
1908 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
1909 ; CHECK-NEXT: vfwcvt.f.f.v v24, v20
1910 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
1911 ; CHECK-NEXT: vfsgnj.vv v8, v8, v24
1913 %e = fpext <vscale x 8 x half> %vs to <vscale x 8 x double>
1914 %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %e)
1915 ret <vscale x 8 x double> %r
1918 define <vscale x 8 x double> @vfcopysign_exttrunc_vf_nxv8f64_nxv8f16(<vscale x 8 x double> %vm, half %s) {
1919 ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f64_nxv8f16:
1921 ; CHECK-NEXT: fcvt.d.h fa5, fa0
1922 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1923 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa5
1925 %head = insertelement <vscale x 8 x half> poison, half %s, i32 0
1926 %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
1927 %esplat = fpext <vscale x 8 x half> %splat to <vscale x 8 x double>
1928 %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %esplat)
1929 ret <vscale x 8 x double> %r
1932 define <vscale x 8 x double> @vfcopynsign_exttrunc_vv_nxv8f64_nxv8f16(<vscale x 8 x double> %vm, <vscale x 8 x half> %vs) {
1933 ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv8f64_nxv8f16:
1935 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1936 ; CHECK-NEXT: vfwcvt.f.f.v v20, v16
1937 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
1938 ; CHECK-NEXT: vfwcvt.f.f.v v24, v20
1939 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
1940 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v24
1942 %n = fneg <vscale x 8 x half> %vs
1943 %eneg = fpext <vscale x 8 x half> %n to <vscale x 8 x double>
1944 %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %eneg)
1945 ret <vscale x 8 x double> %r
1948 define <vscale x 8 x double> @vfcopynsign_exttrunc_vf_nxv8f64_nxv8f16(<vscale x 8 x double> %vm, half %s) {
1949 ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f64_nxv8f16:
1951 ; CHECK-NEXT: fcvt.d.h fa5, fa0
1952 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1953 ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa5
1955 %head = insertelement <vscale x 8 x half> poison, half %s, i32 0
1956 %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
1957 %n = fneg <vscale x 8 x half> %splat
1958 %eneg = fpext <vscale x 8 x half> %n to <vscale x 8 x double>
1959 %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %eneg)
1960 ret <vscale x 8 x double> %r
1963 define <vscale x 8 x double> @vfcopysign_exttrunc_vv_nxv8f64_nxv8f32(<vscale x 8 x double> %vm, <vscale x 8 x float> %vs) {
1964 ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv8f64_nxv8f32:
1966 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1967 ; CHECK-NEXT: vfwcvt.f.f.v v24, v16
1968 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
1969 ; CHECK-NEXT: vfsgnj.vv v8, v8, v24
1971 %e = fpext <vscale x 8 x float> %vs to <vscale x 8 x double>
1972 %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %e)
1973 ret <vscale x 8 x double> %r
1976 define <vscale x 8 x double> @vfcopysign_exttrunc_vf_nxv8f64_nxv8f32(<vscale x 8 x double> %vm, float %s) {
1977 ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f64_nxv8f32:
1979 ; CHECK-NEXT: fcvt.d.s fa5, fa0
1980 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1981 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa5
1983 %head = insertelement <vscale x 8 x float> poison, float %s, i32 0
1984 %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
1985 %esplat = fpext <vscale x 8 x float> %splat to <vscale x 8 x double>
1986 %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %esplat)
1987 ret <vscale x 8 x double> %r
1990 define <vscale x 8 x double> @vfcopynsign_exttrunc_vv_nxv8f64_nxv8f32(<vscale x 8 x double> %vm, <vscale x 8 x float> %vs) {
1991 ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv8f64_nxv8f32:
1993 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1994 ; CHECK-NEXT: vfwcvt.f.f.v v24, v16
1995 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
1996 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v24
1998 %n = fneg <vscale x 8 x float> %vs
1999 %eneg = fpext <vscale x 8 x float> %n to <vscale x 8 x double>
2000 %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %eneg)
2001 ret <vscale x 8 x double> %r
2004 define <vscale x 8 x double> @vfcopynsign_exttrunc_vf_nxv8f64_nxv8f32(<vscale x 8 x double> %vm, float %s) {
2005 ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f64_nxv8f32:
2007 ; CHECK-NEXT: fcvt.d.s fa5, fa0
2008 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2009 ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa5
2011 %head = insertelement <vscale x 8 x float> poison, float %s, i32 0
2012 %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
2013 %n = fneg <vscale x 8 x float> %splat
2014 %eneg = fpext <vscale x 8 x float> %n to <vscale x 8 x double>
2015 %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %eneg)
2016 ret <vscale x 8 x double> %r
2019 define <vscale x 2 x float> @fptrunc_of_copysign_nxv2f32_nxv2f64(<vscale x 2 x double> %X, <vscale x 2 x double> %Y) {
2020 ; CHECK-LABEL: fptrunc_of_copysign_nxv2f32_nxv2f64:
2022 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
2023 ; CHECK-NEXT: vfsgnj.vv v10, v8, v10
2024 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
2025 ; CHECK-NEXT: vfncvt.f.f.w v8, v10
2027 %copy = call fast <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> %X, <vscale x 2 x double> %Y)
2028 %trunc = fptrunc <vscale x 2 x double> %copy to <vscale x 2 x float>
2029 ret <vscale x 2 x float> %trunc