1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh,+f,+d -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh,+f,+d -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s
7 define <2 x float> @vfwmul_v2f16(ptr %x, ptr %y) {
8 ; CHECK-LABEL: vfwmul_v2f16:
10 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
11 ; CHECK-NEXT: vle16.v v9, (a0)
12 ; CHECK-NEXT: vle16.v v10, (a1)
13 ; CHECK-NEXT: vfwmul.vv v8, v9, v10
15 %a = load <2 x half>, ptr %x
16 %b = load <2 x half>, ptr %y
17 %c = fpext <2 x half> %a to <2 x float>
18 %d = fpext <2 x half> %b to <2 x float>
19 %e = fmul <2 x float> %c, %d
23 define <4 x float> @vfwmul_v4f16(ptr %x, ptr %y) {
24 ; CHECK-LABEL: vfwmul_v4f16:
26 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
27 ; CHECK-NEXT: vle16.v v9, (a0)
28 ; CHECK-NEXT: vle16.v v10, (a1)
29 ; CHECK-NEXT: vfwmul.vv v8, v9, v10
31 %a = load <4 x half>, ptr %x
32 %b = load <4 x half>, ptr %y
33 %c = fpext <4 x half> %a to <4 x float>
34 %d = fpext <4 x half> %b to <4 x float>
35 %e = fmul <4 x float> %c, %d
39 define <8 x float> @vfwmul_v8f16(ptr %x, ptr %y) {
40 ; CHECK-LABEL: vfwmul_v8f16:
42 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
43 ; CHECK-NEXT: vle16.v v10, (a0)
44 ; CHECK-NEXT: vle16.v v11, (a1)
45 ; CHECK-NEXT: vfwmul.vv v8, v10, v11
47 %a = load <8 x half>, ptr %x
48 %b = load <8 x half>, ptr %y
49 %c = fpext <8 x half> %a to <8 x float>
50 %d = fpext <8 x half> %b to <8 x float>
51 %e = fmul <8 x float> %c, %d
55 define <16 x float> @vfwmul_v16f16(ptr %x, ptr %y) {
56 ; CHECK-LABEL: vfwmul_v16f16:
58 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
59 ; CHECK-NEXT: vle16.v v12, (a0)
60 ; CHECK-NEXT: vle16.v v14, (a1)
61 ; CHECK-NEXT: vfwmul.vv v8, v12, v14
63 %a = load <16 x half>, ptr %x
64 %b = load <16 x half>, ptr %y
65 %c = fpext <16 x half> %a to <16 x float>
66 %d = fpext <16 x half> %b to <16 x float>
67 %e = fmul <16 x float> %c, %d
71 define <32 x float> @vfwmul_v32f16(ptr %x, ptr %y) {
72 ; CHECK-LABEL: vfwmul_v32f16:
74 ; CHECK-NEXT: li a2, 32
75 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
76 ; CHECK-NEXT: vle16.v v16, (a0)
77 ; CHECK-NEXT: vle16.v v20, (a1)
78 ; CHECK-NEXT: vfwmul.vv v8, v16, v20
80 %a = load <32 x half>, ptr %x
81 %b = load <32 x half>, ptr %y
82 %c = fpext <32 x half> %a to <32 x float>
83 %d = fpext <32 x half> %b to <32 x float>
84 %e = fmul <32 x float> %c, %d
88 define <64 x float> @vfwmul_v64f16(ptr %x, ptr %y) {
89 ; CHECK-LABEL: vfwmul_v64f16:
91 ; CHECK-NEXT: addi sp, sp, -16
92 ; CHECK-NEXT: .cfi_def_cfa_offset 16
93 ; CHECK-NEXT: csrr a2, vlenb
94 ; CHECK-NEXT: slli a2, a2, 4
95 ; CHECK-NEXT: sub sp, sp, a2
96 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
97 ; CHECK-NEXT: li a2, 64
98 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma
99 ; CHECK-NEXT: vle16.v v8, (a0)
100 ; CHECK-NEXT: addi a0, sp, 16
101 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
102 ; CHECK-NEXT: vle16.v v0, (a1)
103 ; CHECK-NEXT: li a0, 32
104 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
105 ; CHECK-NEXT: vslidedown.vx v16, v8, a0
106 ; CHECK-NEXT: vslidedown.vx v8, v0, a0
107 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
108 ; CHECK-NEXT: vfwmul.vv v24, v16, v8
109 ; CHECK-NEXT: csrr a0, vlenb
110 ; CHECK-NEXT: slli a0, a0, 3
111 ; CHECK-NEXT: add a0, sp, a0
112 ; CHECK-NEXT: addi a0, a0, 16
113 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
114 ; CHECK-NEXT: addi a0, sp, 16
115 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
116 ; CHECK-NEXT: vfwmul.vv v8, v16, v0
117 ; CHECK-NEXT: csrr a0, vlenb
118 ; CHECK-NEXT: slli a0, a0, 3
119 ; CHECK-NEXT: add a0, sp, a0
120 ; CHECK-NEXT: addi a0, a0, 16
121 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
122 ; CHECK-NEXT: csrr a0, vlenb
123 ; CHECK-NEXT: slli a0, a0, 4
124 ; CHECK-NEXT: add sp, sp, a0
125 ; CHECK-NEXT: addi sp, sp, 16
127 %a = load <64 x half>, ptr %x
128 %b = load <64 x half>, ptr %y
129 %c = fpext <64 x half> %a to <64 x float>
130 %d = fpext <64 x half> %b to <64 x float>
131 %e = fmul <64 x float> %c, %d
135 define <2 x double> @vfwmul_v2f32(ptr %x, ptr %y) {
136 ; CHECK-LABEL: vfwmul_v2f32:
138 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
139 ; CHECK-NEXT: vle32.v v9, (a0)
140 ; CHECK-NEXT: vle32.v v10, (a1)
141 ; CHECK-NEXT: vfwmul.vv v8, v9, v10
143 %a = load <2 x float>, ptr %x
144 %b = load <2 x float>, ptr %y
145 %c = fpext <2 x float> %a to <2 x double>
146 %d = fpext <2 x float> %b to <2 x double>
147 %e = fmul <2 x double> %c, %d
151 define <4 x double> @vfwmul_v4f32(ptr %x, ptr %y) {
152 ; CHECK-LABEL: vfwmul_v4f32:
154 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
155 ; CHECK-NEXT: vle32.v v10, (a0)
156 ; CHECK-NEXT: vle32.v v11, (a1)
157 ; CHECK-NEXT: vfwmul.vv v8, v10, v11
159 %a = load <4 x float>, ptr %x
160 %b = load <4 x float>, ptr %y
161 %c = fpext <4 x float> %a to <4 x double>
162 %d = fpext <4 x float> %b to <4 x double>
163 %e = fmul <4 x double> %c, %d
167 define <8 x double> @vfwmul_v8f32(ptr %x, ptr %y) {
168 ; CHECK-LABEL: vfwmul_v8f32:
170 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
171 ; CHECK-NEXT: vle32.v v12, (a0)
172 ; CHECK-NEXT: vle32.v v14, (a1)
173 ; CHECK-NEXT: vfwmul.vv v8, v12, v14
175 %a = load <8 x float>, ptr %x
176 %b = load <8 x float>, ptr %y
177 %c = fpext <8 x float> %a to <8 x double>
178 %d = fpext <8 x float> %b to <8 x double>
179 %e = fmul <8 x double> %c, %d
183 define <16 x double> @vfwmul_v16f32(ptr %x, ptr %y) {
184 ; CHECK-LABEL: vfwmul_v16f32:
186 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
187 ; CHECK-NEXT: vle32.v v16, (a0)
188 ; CHECK-NEXT: vle32.v v20, (a1)
189 ; CHECK-NEXT: vfwmul.vv v8, v16, v20
191 %a = load <16 x float>, ptr %x
192 %b = load <16 x float>, ptr %y
193 %c = fpext <16 x float> %a to <16 x double>
194 %d = fpext <16 x float> %b to <16 x double>
195 %e = fmul <16 x double> %c, %d
199 define <32 x double> @vfwmul_v32f32(ptr %x, ptr %y) {
200 ; CHECK-LABEL: vfwmul_v32f32:
202 ; CHECK-NEXT: addi sp, sp, -16
203 ; CHECK-NEXT: .cfi_def_cfa_offset 16
204 ; CHECK-NEXT: csrr a2, vlenb
205 ; CHECK-NEXT: slli a2, a2, 4
206 ; CHECK-NEXT: sub sp, sp, a2
207 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
208 ; CHECK-NEXT: li a2, 32
209 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
210 ; CHECK-NEXT: vle32.v v8, (a0)
211 ; CHECK-NEXT: addi a0, sp, 16
212 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
213 ; CHECK-NEXT: vle32.v v0, (a1)
214 ; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
215 ; CHECK-NEXT: vslidedown.vi v16, v8, 16
216 ; CHECK-NEXT: vslidedown.vi v8, v0, 16
217 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
218 ; CHECK-NEXT: vfwmul.vv v24, v16, v8
219 ; CHECK-NEXT: csrr a0, vlenb
220 ; CHECK-NEXT: slli a0, a0, 3
221 ; CHECK-NEXT: add a0, sp, a0
222 ; CHECK-NEXT: addi a0, a0, 16
223 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
224 ; CHECK-NEXT: addi a0, sp, 16
225 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
226 ; CHECK-NEXT: vfwmul.vv v8, v16, v0
227 ; CHECK-NEXT: csrr a0, vlenb
228 ; CHECK-NEXT: slli a0, a0, 3
229 ; CHECK-NEXT: add a0, sp, a0
230 ; CHECK-NEXT: addi a0, a0, 16
231 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
232 ; CHECK-NEXT: csrr a0, vlenb
233 ; CHECK-NEXT: slli a0, a0, 4
234 ; CHECK-NEXT: add sp, sp, a0
235 ; CHECK-NEXT: addi sp, sp, 16
237 %a = load <32 x float>, ptr %x
238 %b = load <32 x float>, ptr %y
239 %c = fpext <32 x float> %a to <32 x double>
240 %d = fpext <32 x float> %b to <32 x double>
241 %e = fmul <32 x double> %c, %d
245 define <2 x float> @vfwmul_vf_v2f16(ptr %x, half %y) {
246 ; CHECK-LABEL: vfwmul_vf_v2f16:
248 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
249 ; CHECK-NEXT: vle16.v v9, (a0)
250 ; CHECK-NEXT: vfwmul.vf v8, v9, fa0
252 %a = load <2 x half>, ptr %x
253 %b = insertelement <2 x half> poison, half %y, i32 0
254 %c = shufflevector <2 x half> %b, <2 x half> poison, <2 x i32> zeroinitializer
255 %d = fpext <2 x half> %a to <2 x float>
256 %e = fpext <2 x half> %c to <2 x float>
257 %f = fmul <2 x float> %d, %e
261 define <4 x float> @vfwmul_vf_v4f16(ptr %x, half %y) {
262 ; CHECK-LABEL: vfwmul_vf_v4f16:
264 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
265 ; CHECK-NEXT: vle16.v v9, (a0)
266 ; CHECK-NEXT: vfwmul.vf v8, v9, fa0
268 %a = load <4 x half>, ptr %x
269 %b = insertelement <4 x half> poison, half %y, i32 0
270 %c = shufflevector <4 x half> %b, <4 x half> poison, <4 x i32> zeroinitializer
271 %d = fpext <4 x half> %a to <4 x float>
272 %e = fpext <4 x half> %c to <4 x float>
273 %f = fmul <4 x float> %d, %e
277 define <8 x float> @vfwmul_vf_v8f16(ptr %x, half %y) {
278 ; CHECK-LABEL: vfwmul_vf_v8f16:
280 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
281 ; CHECK-NEXT: vle16.v v10, (a0)
282 ; CHECK-NEXT: vfwmul.vf v8, v10, fa0
284 %a = load <8 x half>, ptr %x
285 %b = insertelement <8 x half> poison, half %y, i32 0
286 %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
287 %d = fpext <8 x half> %a to <8 x float>
288 %e = fpext <8 x half> %c to <8 x float>
289 %f = fmul <8 x float> %d, %e
293 define <16 x float> @vfwmul_vf_v16f16(ptr %x, half %y) {
294 ; CHECK-LABEL: vfwmul_vf_v16f16:
296 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
297 ; CHECK-NEXT: vle16.v v12, (a0)
298 ; CHECK-NEXT: vfwmul.vf v8, v12, fa0
300 %a = load <16 x half>, ptr %x
301 %b = insertelement <16 x half> poison, half %y, i32 0
302 %c = shufflevector <16 x half> %b, <16 x half> poison, <16 x i32> zeroinitializer
303 %d = fpext <16 x half> %a to <16 x float>
304 %e = fpext <16 x half> %c to <16 x float>
305 %f = fmul <16 x float> %d, %e
309 define <32 x float> @vfwmul_vf_v32f16(ptr %x, half %y) {
310 ; CHECK-LABEL: vfwmul_vf_v32f16:
312 ; CHECK-NEXT: li a1, 32
313 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
314 ; CHECK-NEXT: vle16.v v16, (a0)
315 ; CHECK-NEXT: vfwmul.vf v8, v16, fa0
317 %a = load <32 x half>, ptr %x
318 %b = insertelement <32 x half> poison, half %y, i32 0
319 %c = shufflevector <32 x half> %b, <32 x half> poison, <32 x i32> zeroinitializer
320 %d = fpext <32 x half> %a to <32 x float>
321 %e = fpext <32 x half> %c to <32 x float>
322 %f = fmul <32 x float> %d, %e
326 define <2 x double> @vfwmul_vf_v2f32(ptr %x, float %y) {
327 ; CHECK-LABEL: vfwmul_vf_v2f32:
329 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
330 ; CHECK-NEXT: vle32.v v9, (a0)
331 ; CHECK-NEXT: vfwmul.vf v8, v9, fa0
333 %a = load <2 x float>, ptr %x
334 %b = insertelement <2 x float> poison, float %y, i32 0
335 %c = shufflevector <2 x float> %b, <2 x float> poison, <2 x i32> zeroinitializer
336 %d = fpext <2 x float> %a to <2 x double>
337 %e = fpext <2 x float> %c to <2 x double>
338 %f = fmul <2 x double> %d, %e
342 define <4 x double> @vfwmul_vf_v4f32(ptr %x, float %y) {
343 ; CHECK-LABEL: vfwmul_vf_v4f32:
345 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
346 ; CHECK-NEXT: vle32.v v10, (a0)
347 ; CHECK-NEXT: vfwmul.vf v8, v10, fa0
349 %a = load <4 x float>, ptr %x
350 %b = insertelement <4 x float> poison, float %y, i32 0
351 %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
352 %d = fpext <4 x float> %a to <4 x double>
353 %e = fpext <4 x float> %c to <4 x double>
354 %f = fmul <4 x double> %d, %e
358 define <8 x double> @vfwmul_vf_v8f32(ptr %x, float %y) {
359 ; CHECK-LABEL: vfwmul_vf_v8f32:
361 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
362 ; CHECK-NEXT: vle32.v v12, (a0)
363 ; CHECK-NEXT: vfwmul.vf v8, v12, fa0
365 %a = load <8 x float>, ptr %x
366 %b = insertelement <8 x float> poison, float %y, i32 0
367 %c = shufflevector <8 x float> %b, <8 x float> poison, <8 x i32> zeroinitializer
368 %d = fpext <8 x float> %a to <8 x double>
369 %e = fpext <8 x float> %c to <8 x double>
370 %f = fmul <8 x double> %d, %e
374 define <16 x double> @vfwmul_vf_v16f32(ptr %x, float %y) {
375 ; CHECK-LABEL: vfwmul_vf_v16f32:
377 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
378 ; CHECK-NEXT: vle32.v v16, (a0)
379 ; CHECK-NEXT: vfwmul.vf v8, v16, fa0
381 %a = load <16 x float>, ptr %x
382 %b = insertelement <16 x float> poison, float %y, i32 0
383 %c = shufflevector <16 x float> %b, <16 x float> poison, <16 x i32> zeroinitializer
384 %d = fpext <16 x float> %a to <16 x double>
385 %e = fpext <16 x float> %c to <16 x double>
386 %f = fmul <16 x double> %d, %e
390 define <32 x double> @vfwmul_vf_v32f32(ptr %x, float %y) {
391 ; CHECK-LABEL: vfwmul_vf_v32f32:
393 ; CHECK-NEXT: li a1, 32
394 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
395 ; CHECK-NEXT: vle32.v v24, (a0)
396 ; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
397 ; CHECK-NEXT: vslidedown.vi v8, v24, 16
398 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
399 ; CHECK-NEXT: vfwmul.vf v16, v8, fa0
400 ; CHECK-NEXT: vfwmul.vf v8, v24, fa0
402 %a = load <32 x float>, ptr %x
403 %b = insertelement <32 x float> poison, float %y, i32 0
404 %c = shufflevector <32 x float> %b, <32 x float> poison, <32 x i32> zeroinitializer
405 %d = fpext <32 x float> %a to <32 x double>
406 %e = fpext <32 x float> %c to <32 x double>
407 %f = fmul <32 x double> %d, %e
411 define <2 x float> @vfwmul_squared_v2f16_v2f32(ptr %x) {
412 ; CHECK-LABEL: vfwmul_squared_v2f16_v2f32:
414 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
415 ; CHECK-NEXT: vle16.v v9, (a0)
416 ; CHECK-NEXT: vfwmul.vv v8, v9, v9
418 %a = load <2 x half>, ptr %x
419 %b = fpext <2 x half> %a to <2 x float>
420 %c = fmul <2 x float> %b, %b
424 define <2 x double> @vfwmul_squared_v2f32_v2f64(ptr %x) {
425 ; CHECK-LABEL: vfwmul_squared_v2f32_v2f64:
427 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
428 ; CHECK-NEXT: vle32.v v9, (a0)
429 ; CHECK-NEXT: vfwmul.vv v8, v9, v9
431 %a = load <2 x float>, ptr %x
432 %b = fpext <2 x float> %a to <2 x double>
433 %c = fmul <2 x double> %b, %b
437 define <2 x double> @vfwmul_squared_v2f16_v2f64(ptr %x) {
438 ; CHECK-LABEL: vfwmul_squared_v2f16_v2f64:
440 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
441 ; CHECK-NEXT: vle16.v v8, (a0)
442 ; CHECK-NEXT: vfwcvt.f.f.v v9, v8
443 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
444 ; CHECK-NEXT: vfwmul.vv v8, v9, v9
446 %a = load <2 x half>, ptr %x
447 %b = fpext <2 x half> %a to <2 x double>
448 %c = fmul <2 x double> %b, %b
452 define <2 x float> @vfwmul_vf2_v2f32(<2 x half> %x, half %y) {
453 ; CHECK-LABEL: vfwmul_vf2_v2f32:
455 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
456 ; CHECK-NEXT: vfwmul.vf v9, v8, fa0
457 ; CHECK-NEXT: vmv1r.v v8, v9
459 %a = fpext <2 x half> %x to <2 x float>
460 %b = fpext half %y to float
461 %c = insertelement <2 x float> poison, float %b, i32 0
462 %d = shufflevector <2 x float> %c, <2 x float> poison, <2 x i32> zeroinitializer
463 %e = fmul <2 x float> %a, %d