1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s
7 declare <vscale x 1 x half> @llvm.experimental.constrained.sqrt.nxv1f16(<vscale x 1 x half>, metadata, metadata)
9 define <vscale x 1 x half> @vfsqrt_nxv1f16(<vscale x 1 x half> %v) strictfp {
10 ; CHECK-LABEL: vfsqrt_nxv1f16:
12 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
13 ; CHECK-NEXT: vfsqrt.v v8, v8
15 %r = call <vscale x 1 x half> @llvm.experimental.constrained.sqrt.nxv1f16(<vscale x 1 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
16 ret <vscale x 1 x half> %r
19 declare <vscale x 2 x half> @llvm.experimental.constrained.sqrt.nxv2f16(<vscale x 2 x half>, metadata, metadata)
21 define <vscale x 2 x half> @vfsqrt_nxv2f16(<vscale x 2 x half> %v) strictfp {
22 ; CHECK-LABEL: vfsqrt_nxv2f16:
24 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
25 ; CHECK-NEXT: vfsqrt.v v8, v8
27 %r = call <vscale x 2 x half> @llvm.experimental.constrained.sqrt.nxv2f16(<vscale x 2 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
28 ret <vscale x 2 x half> %r
31 declare <vscale x 4 x half> @llvm.experimental.constrained.sqrt.nxv4f16(<vscale x 4 x half>, metadata, metadata)
33 define <vscale x 4 x half> @vfsqrt_nxv4f16(<vscale x 4 x half> %v) strictfp {
34 ; CHECK-LABEL: vfsqrt_nxv4f16:
36 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
37 ; CHECK-NEXT: vfsqrt.v v8, v8
39 %r = call <vscale x 4 x half> @llvm.experimental.constrained.sqrt.nxv4f16(<vscale x 4 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
40 ret <vscale x 4 x half> %r
43 declare <vscale x 8 x half> @llvm.experimental.constrained.sqrt.nxv8f16(<vscale x 8 x half>, metadata, metadata)
45 define <vscale x 8 x half> @vfsqrt_nxv8f16(<vscale x 8 x half> %v) strictfp {
46 ; CHECK-LABEL: vfsqrt_nxv8f16:
48 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
49 ; CHECK-NEXT: vfsqrt.v v8, v8
51 %r = call <vscale x 8 x half> @llvm.experimental.constrained.sqrt.nxv8f16(<vscale x 8 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
52 ret <vscale x 8 x half> %r
55 declare <vscale x 16 x half> @llvm.experimental.constrained.sqrt.nxv16f16(<vscale x 16 x half>, metadata, metadata)
57 define <vscale x 16 x half> @vfsqrt_nxv16f16(<vscale x 16 x half> %v) strictfp {
58 ; CHECK-LABEL: vfsqrt_nxv16f16:
60 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
61 ; CHECK-NEXT: vfsqrt.v v8, v8
63 %r = call <vscale x 16 x half> @llvm.experimental.constrained.sqrt.nxv16f16(<vscale x 16 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
64 ret <vscale x 16 x half> %r
67 declare <vscale x 32 x half> @llvm.experimental.constrained.sqrt.nxv32f16(<vscale x 32 x half>, metadata, metadata)
69 define <vscale x 32 x half> @vfsqrt_nxv32f16(<vscale x 32 x half> %v) strictfp {
70 ; CHECK-LABEL: vfsqrt_nxv32f16:
72 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
73 ; CHECK-NEXT: vfsqrt.v v8, v8
75 %r = call <vscale x 32 x half> @llvm.experimental.constrained.sqrt.nxv32f16(<vscale x 32 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
76 ret <vscale x 32 x half> %r
79 declare <vscale x 1 x float> @llvm.experimental.constrained.sqrt.nxv1f32(<vscale x 1 x float>, metadata, metadata)
81 define <vscale x 1 x float> @vfsqrt_nxv1f32(<vscale x 1 x float> %v) strictfp {
82 ; CHECK-LABEL: vfsqrt_nxv1f32:
84 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
85 ; CHECK-NEXT: vfsqrt.v v8, v8
87 %r = call <vscale x 1 x float> @llvm.experimental.constrained.sqrt.nxv1f32(<vscale x 1 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
88 ret <vscale x 1 x float> %r
91 declare <vscale x 2 x float> @llvm.experimental.constrained.sqrt.nxv2f32(<vscale x 2 x float>, metadata, metadata)
93 define <vscale x 2 x float> @vfsqrt_nxv2f32(<vscale x 2 x float> %v) strictfp {
94 ; CHECK-LABEL: vfsqrt_nxv2f32:
96 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
97 ; CHECK-NEXT: vfsqrt.v v8, v8
99 %r = call <vscale x 2 x float> @llvm.experimental.constrained.sqrt.nxv2f32(<vscale x 2 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
100 ret <vscale x 2 x float> %r
103 declare <vscale x 4 x float> @llvm.experimental.constrained.sqrt.nxv4f32(<vscale x 4 x float>, metadata, metadata)
105 define <vscale x 4 x float> @vfsqrt_nxv4f32(<vscale x 4 x float> %v) strictfp {
106 ; CHECK-LABEL: vfsqrt_nxv4f32:
108 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
109 ; CHECK-NEXT: vfsqrt.v v8, v8
111 %r = call <vscale x 4 x float> @llvm.experimental.constrained.sqrt.nxv4f32(<vscale x 4 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
112 ret <vscale x 4 x float> %r
115 declare <vscale x 8 x float> @llvm.experimental.constrained.sqrt.nxv8f32(<vscale x 8 x float>, metadata, metadata)
117 define <vscale x 8 x float> @vfsqrt_nxv8f32(<vscale x 8 x float> %v) strictfp {
118 ; CHECK-LABEL: vfsqrt_nxv8f32:
120 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
121 ; CHECK-NEXT: vfsqrt.v v8, v8
123 %r = call <vscale x 8 x float> @llvm.experimental.constrained.sqrt.nxv8f32(<vscale x 8 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
124 ret <vscale x 8 x float> %r
127 declare <vscale x 16 x float> @llvm.experimental.constrained.sqrt.nxv16f32(<vscale x 16 x float>, metadata, metadata)
129 define <vscale x 16 x float> @vfsqrt_nxv16f32(<vscale x 16 x float> %v) strictfp {
130 ; CHECK-LABEL: vfsqrt_nxv16f32:
132 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
133 ; CHECK-NEXT: vfsqrt.v v8, v8
135 %r = call <vscale x 16 x float> @llvm.experimental.constrained.sqrt.nxv16f32(<vscale x 16 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
136 ret <vscale x 16 x float> %r
139 declare <vscale x 1 x double> @llvm.experimental.constrained.sqrt.nxv1f64(<vscale x 1 x double>, metadata, metadata)
141 define <vscale x 1 x double> @vfsqrt_nxv1f64(<vscale x 1 x double> %v) strictfp {
142 ; CHECK-LABEL: vfsqrt_nxv1f64:
144 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
145 ; CHECK-NEXT: vfsqrt.v v8, v8
147 %r = call <vscale x 1 x double> @llvm.experimental.constrained.sqrt.nxv1f64(<vscale x 1 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
148 ret <vscale x 1 x double> %r
151 declare <vscale x 2 x double> @llvm.experimental.constrained.sqrt.nxv2f64(<vscale x 2 x double>, metadata, metadata)
153 define <vscale x 2 x double> @vfsqrt_nxv2f64(<vscale x 2 x double> %v) strictfp {
154 ; CHECK-LABEL: vfsqrt_nxv2f64:
156 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
157 ; CHECK-NEXT: vfsqrt.v v8, v8
159 %r = call <vscale x 2 x double> @llvm.experimental.constrained.sqrt.nxv2f64(<vscale x 2 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
160 ret <vscale x 2 x double> %r
163 declare <vscale x 4 x double> @llvm.experimental.constrained.sqrt.nxv4f64(<vscale x 4 x double>, metadata, metadata)
165 define <vscale x 4 x double> @vfsqrt_nxv4f64(<vscale x 4 x double> %v) strictfp {
166 ; CHECK-LABEL: vfsqrt_nxv4f64:
168 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
169 ; CHECK-NEXT: vfsqrt.v v8, v8
171 %r = call <vscale x 4 x double> @llvm.experimental.constrained.sqrt.nxv4f64(<vscale x 4 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
172 ret <vscale x 4 x double> %r
175 declare <vscale x 8 x double> @llvm.experimental.constrained.sqrt.nxv8f64(<vscale x 8 x double>, metadata, metadata)
177 define <vscale x 8 x double> @vfsqrt_nxv8f64(<vscale x 8 x double> %v) strictfp {
178 ; CHECK-LABEL: vfsqrt_nxv8f64:
180 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
181 ; CHECK-NEXT: vfsqrt.v v8, v8
183 %r = call <vscale x 8 x double> @llvm.experimental.constrained.sqrt.nxv8f64(<vscale x 8 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
184 ret <vscale x 8 x double> %r