1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s
7 declare <2 x half> @llvm.vp.copysign.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
9 define <2 x half> @vfsgnj_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) {
10 ; CHECK-LABEL: vfsgnj_vv_v2f16:
12 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
13 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
15 %v = call <2 x half> @llvm.vp.copysign.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
19 define <2 x half> @vfsgnj_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 zeroext %evl) {
20 ; CHECK-LABEL: vfsgnj_vv_v2f16_unmasked:
22 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
23 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
25 %v = call <2 x half> @llvm.vp.copysign.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> splat (i1 true), i32 %evl)
29 declare <4 x half> @llvm.vp.copysign.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
31 define <4 x half> @vfsgnj_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) {
32 ; CHECK-LABEL: vfsgnj_vv_v4f16:
34 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
35 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
37 %v = call <4 x half> @llvm.vp.copysign.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
41 define <4 x half> @vfsgnj_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 zeroext %evl) {
42 ; CHECK-LABEL: vfsgnj_vv_v4f16_unmasked:
44 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
45 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
47 %v = call <4 x half> @llvm.vp.copysign.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> splat (i1 true), i32 %evl)
51 declare <8 x half> @llvm.vp.copysign.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
53 define <8 x half> @vfsgnj_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) {
54 ; CHECK-LABEL: vfsgnj_vv_v8f16:
56 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
57 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
59 %v = call <8 x half> @llvm.vp.copysign.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
63 define <8 x half> @vfsgnj_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 zeroext %evl) {
64 ; CHECK-LABEL: vfsgnj_vv_v8f16_unmasked:
66 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
67 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
69 %v = call <8 x half> @llvm.vp.copysign.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> splat (i1 true), i32 %evl)
73 declare <16 x half> @llvm.vp.copysign.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32)
75 define <16 x half> @vfsgnj_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) {
76 ; CHECK-LABEL: vfsgnj_vv_v16f16:
78 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
79 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t
81 %v = call <16 x half> @llvm.vp.copysign.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
85 define <16 x half> @vfsgnj_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i32 zeroext %evl) {
86 ; CHECK-LABEL: vfsgnj_vv_v16f16_unmasked:
88 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
89 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10
91 %v = call <16 x half> @llvm.vp.copysign.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> splat (i1 true), i32 %evl)
95 declare <2 x float> @llvm.vp.copysign.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32)
97 define <2 x float> @vfsgnj_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) {
98 ; CHECK-LABEL: vfsgnj_vv_v2f32:
100 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
101 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
103 %v = call <2 x float> @llvm.vp.copysign.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
107 define <2 x float> @vfsgnj_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i32 zeroext %evl) {
108 ; CHECK-LABEL: vfsgnj_vv_v2f32_unmasked:
110 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
111 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
113 %v = call <2 x float> @llvm.vp.copysign.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> splat (i1 true), i32 %evl)
117 declare <4 x float> @llvm.vp.copysign.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
119 define <4 x float> @vfsgnj_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
120 ; CHECK-LABEL: vfsgnj_vv_v4f32:
122 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
123 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
125 %v = call <4 x float> @llvm.vp.copysign.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
129 define <4 x float> @vfsgnj_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i32 zeroext %evl) {
130 ; CHECK-LABEL: vfsgnj_vv_v4f32_unmasked:
132 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
133 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
135 %v = call <4 x float> @llvm.vp.copysign.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> splat (i1 true), i32 %evl)
139 declare <8 x float> @llvm.vp.copysign.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32)
141 define <8 x float> @vfsgnj_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
142 ; CHECK-LABEL: vfsgnj_vv_v8f32:
144 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
145 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t
147 %v = call <8 x float> @llvm.vp.copysign.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
151 define <8 x float> @vfsgnj_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i32 zeroext %evl) {
152 ; CHECK-LABEL: vfsgnj_vv_v8f32_unmasked:
154 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
155 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10
157 %v = call <8 x float> @llvm.vp.copysign.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> splat (i1 true), i32 %evl)
161 declare <16 x float> @llvm.vp.copysign.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32)
163 define <16 x float> @vfsgnj_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) {
164 ; CHECK-LABEL: vfsgnj_vv_v16f32:
166 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
167 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t
169 %v = call <16 x float> @llvm.vp.copysign.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
173 define <16 x float> @vfsgnj_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb, i32 zeroext %evl) {
174 ; CHECK-LABEL: vfsgnj_vv_v16f32_unmasked:
176 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
177 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12
179 %v = call <16 x float> @llvm.vp.copysign.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> splat (i1 true), i32 %evl)
183 declare <2 x double> @llvm.vp.copysign.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32)
185 define <2 x double> @vfsgnj_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) {
186 ; CHECK-LABEL: vfsgnj_vv_v2f64:
188 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
189 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
191 %v = call <2 x double> @llvm.vp.copysign.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
195 define <2 x double> @vfsgnj_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb, i32 zeroext %evl) {
196 ; CHECK-LABEL: vfsgnj_vv_v2f64_unmasked:
198 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
199 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
201 %v = call <2 x double> @llvm.vp.copysign.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> splat (i1 true), i32 %evl)
205 declare <4 x double> @llvm.vp.copysign.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32)
207 define <4 x double> @vfsgnj_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) {
208 ; CHECK-LABEL: vfsgnj_vv_v4f64:
210 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
211 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t
213 %v = call <4 x double> @llvm.vp.copysign.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
217 define <4 x double> @vfsgnj_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb, i32 zeroext %evl) {
218 ; CHECK-LABEL: vfsgnj_vv_v4f64_unmasked:
220 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
221 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10
223 %v = call <4 x double> @llvm.vp.copysign.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> splat (i1 true), i32 %evl)
227 declare <8 x double> @llvm.vp.copysign.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32)
229 define <8 x double> @vfsgnj_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) {
230 ; CHECK-LABEL: vfsgnj_vv_v8f64:
232 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
233 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t
235 %v = call <8 x double> @llvm.vp.copysign.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
239 define <8 x double> @vfsgnj_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb, i32 zeroext %evl) {
240 ; CHECK-LABEL: vfsgnj_vv_v8f64_unmasked:
242 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
243 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12
245 %v = call <8 x double> @llvm.vp.copysign.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> splat (i1 true), i32 %evl)
249 declare <15 x double> @llvm.vp.copysign.v15f64(<15 x double>, <15 x double>, <15 x i1>, i32)
251 define <15 x double> @vfsgnj_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 zeroext %evl) {
252 ; CHECK-LABEL: vfsgnj_vv_v15f64:
254 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
255 ; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t
257 %v = call <15 x double> @llvm.vp.copysign.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl)
261 define <15 x double> @vfsgnj_vv_v15f64_unmasked(<15 x double> %va, <15 x double> %vb, i32 zeroext %evl) {
262 ; CHECK-LABEL: vfsgnj_vv_v15f64_unmasked:
264 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
265 ; CHECK-NEXT: vfsgnj.vv v8, v8, v16
267 %v = call <15 x double> @llvm.vp.copysign.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> splat (i1 true), i32 %evl)
271 declare <16 x double> @llvm.vp.copysign.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32)
273 define <16 x double> @vfsgnj_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) {
274 ; CHECK-LABEL: vfsgnj_vv_v16f64:
276 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
277 ; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t
279 %v = call <16 x double> @llvm.vp.copysign.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
283 define <16 x double> @vfsgnj_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %vb, i32 zeroext %evl) {
284 ; CHECK-LABEL: vfsgnj_vv_v16f64_unmasked:
286 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
287 ; CHECK-NEXT: vfsgnj.vv v8, v8, v16
289 %v = call <16 x double> @llvm.vp.copysign.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> splat (i1 true), i32 %evl)
293 declare <32 x double> @llvm.vp.copysign.v32f64(<32 x double>, <32 x double>, <32 x i1>, i32)
295 define <32 x double> @vfsgnj_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) {
296 ; CHECK-LABEL: vfsgnj_vv_v32f64:
298 ; CHECK-NEXT: addi sp, sp, -16
299 ; CHECK-NEXT: .cfi_def_cfa_offset 16
300 ; CHECK-NEXT: csrr a1, vlenb
301 ; CHECK-NEXT: slli a1, a1, 4
302 ; CHECK-NEXT: sub sp, sp, a1
303 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
304 ; CHECK-NEXT: addi a1, a0, 128
305 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
306 ; CHECK-NEXT: vle64.v v24, (a1)
307 ; CHECK-NEXT: csrr a1, vlenb
308 ; CHECK-NEXT: slli a1, a1, 3
309 ; CHECK-NEXT: add a1, sp, a1
310 ; CHECK-NEXT: addi a1, a1, 16
311 ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
312 ; CHECK-NEXT: vle64.v v24, (a0)
313 ; CHECK-NEXT: addi a0, sp, 16
314 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
315 ; CHECK-NEXT: li a1, 16
316 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
317 ; CHECK-NEXT: vslidedown.vi v7, v0, 2
318 ; CHECK-NEXT: mv a0, a2
319 ; CHECK-NEXT: bltu a2, a1, .LBB26_2
320 ; CHECK-NEXT: # %bb.1:
321 ; CHECK-NEXT: li a0, 16
322 ; CHECK-NEXT: .LBB26_2:
323 ; CHECK-NEXT: addi a1, sp, 16
324 ; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
325 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
326 ; CHECK-NEXT: vfsgnj.vv v8, v8, v24, v0.t
327 ; CHECK-NEXT: addi a0, a2, -16
328 ; CHECK-NEXT: sltu a1, a2, a0
329 ; CHECK-NEXT: addi a1, a1, -1
330 ; CHECK-NEXT: and a0, a1, a0
331 ; CHECK-NEXT: vmv1r.v v0, v7
332 ; CHECK-NEXT: csrr a1, vlenb
333 ; CHECK-NEXT: slli a1, a1, 3
334 ; CHECK-NEXT: add a1, sp, a1
335 ; CHECK-NEXT: addi a1, a1, 16
336 ; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
337 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
338 ; CHECK-NEXT: vfsgnj.vv v16, v16, v24, v0.t
339 ; CHECK-NEXT: csrr a0, vlenb
340 ; CHECK-NEXT: slli a0, a0, 4
341 ; CHECK-NEXT: add sp, sp, a0
342 ; CHECK-NEXT: addi sp, sp, 16
344 %v = call <32 x double> @llvm.vp.copysign.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
348 define <32 x double> @vfsgnj_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %vb, i32 zeroext %evl) {
349 ; CHECK-LABEL: vfsgnj_vv_v32f64_unmasked:
351 ; CHECK-NEXT: addi a1, a0, 128
352 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
353 ; CHECK-NEXT: vle64.v v24, (a1)
354 ; CHECK-NEXT: vle64.v v0, (a0)
355 ; CHECK-NEXT: li a1, 16
356 ; CHECK-NEXT: mv a0, a2
357 ; CHECK-NEXT: bltu a2, a1, .LBB27_2
358 ; CHECK-NEXT: # %bb.1:
359 ; CHECK-NEXT: li a0, 16
360 ; CHECK-NEXT: .LBB27_2:
361 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
362 ; CHECK-NEXT: vfsgnj.vv v8, v8, v0
363 ; CHECK-NEXT: addi a0, a2, -16
364 ; CHECK-NEXT: sltu a1, a2, a0
365 ; CHECK-NEXT: addi a1, a1, -1
366 ; CHECK-NEXT: and a0, a1, a0
367 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
368 ; CHECK-NEXT: vfsgnj.vv v16, v16, v24
370 %v = call <32 x double> @llvm.vp.copysign.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> splat (i1 true), i32 %evl)