1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s | FileCheck %s
4 target triple = "aarch64-unknown-linux-gnu"
6 define <vscale x 8 x half> @fadd_h_immhalf(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
7 ; CHECK-LABEL: fadd_h_immhalf:
9 ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, #0.5
11 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %pg,
12 <vscale x 8 x half> %a,
13 <vscale x 8 x half> splat(half 0.500000e+00))
14 ret <vscale x 8 x half> %out
18 define <vscale x 8 x half> @fadd_h_immhalf_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #1 {
19 ; CHECK-LABEL: fadd_h_immhalf_zero:
21 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
22 ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, #0.5
24 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
25 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %pg,
26 <vscale x 8 x half> %a_z,
27 <vscale x 8 x half> splat(half 0.500000e+00))
28 ret <vscale x 8 x half> %out
31 define <vscale x 8 x half> @fadd_h_immone(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
32 ; CHECK-LABEL: fadd_h_immone:
34 ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, #1.0
36 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %pg,
37 <vscale x 8 x half> %a,
38 <vscale x 8 x half> splat(half 1.000000e+00))
39 ret <vscale x 8 x half> %out
42 define <vscale x 8 x half> @fadd_h_immone_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #1 {
43 ; CHECK-LABEL: fadd_h_immone_zero:
45 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
46 ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, #1.0
48 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
49 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %pg,
50 <vscale x 8 x half> %a_z,
51 <vscale x 8 x half> splat(half 1.000000e+00))
52 ret <vscale x 8 x half> %out
55 define <vscale x 4 x float> @fadd_s_immhalf(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
56 ; CHECK-LABEL: fadd_s_immhalf:
58 ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, #0.5
60 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %pg,
61 <vscale x 4 x float> %a,
62 <vscale x 4 x float> splat(float 0.500000e+00))
63 ret <vscale x 4 x float> %out
66 define <vscale x 4 x float> @fadd_s_immhalf_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #1 {
67 ; CHECK-LABEL: fadd_s_immhalf_zero:
69 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
70 ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, #0.5
72 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
73 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %pg,
74 <vscale x 4 x float> %a_z,
75 <vscale x 4 x float> splat(float 0.500000e+00))
76 ret <vscale x 4 x float> %out
79 define <vscale x 4 x float> @fadd_s_immone(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
80 ; CHECK-LABEL: fadd_s_immone:
82 ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, #1.0
84 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %pg,
85 <vscale x 4 x float> %a,
86 <vscale x 4 x float> splat(float 1.000000e+00))
87 ret <vscale x 4 x float> %out
90 define <vscale x 4 x float> @fadd_s_immone_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #1 {
91 ; CHECK-LABEL: fadd_s_immone_zero:
93 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
94 ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, #1.0
96 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
97 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %pg,
98 <vscale x 4 x float> %a_z,
99 <vscale x 4 x float> splat(float 1.000000e+00))
100 ret <vscale x 4 x float> %out
103 define <vscale x 2 x double> @fadd_d_immhalf(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
104 ; CHECK-LABEL: fadd_d_immhalf:
106 ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, #0.5
108 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %pg,
109 <vscale x 2 x double> %a,
110 <vscale x 2 x double> splat(double 0.500000e+00))
111 ret <vscale x 2 x double> %out
114 define <vscale x 2 x double> @fadd_d_immhalf_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #1 {
115 ; CHECK-LABEL: fadd_d_immhalf_zero:
117 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
118 ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, #0.5
120 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
121 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %pg,
122 <vscale x 2 x double> %a_z,
123 <vscale x 2 x double> splat(double 0.500000e+00))
124 ret <vscale x 2 x double> %out
127 define <vscale x 2 x double> @fadd_d_immone(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
128 ; CHECK-LABEL: fadd_d_immone:
130 ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, #1.0
132 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %pg,
133 <vscale x 2 x double> %a,
134 <vscale x 2 x double> splat(double 1.000000e+00))
135 ret <vscale x 2 x double> %out
138 define <vscale x 2 x double> @fadd_d_immone_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #1 {
139 ; CHECK-LABEL: fadd_d_immone_zero:
141 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
142 ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, #1.0
144 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
145 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %pg,
146 <vscale x 2 x double> %a_z,
147 <vscale x 2 x double> splat(double 1.000000e+00))
148 ret <vscale x 2 x double> %out
151 define <vscale x 8 x half> @fmax_h_immzero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
152 ; CHECK-LABEL: fmax_h_immzero:
154 ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, #0.0
156 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1> %pg,
157 <vscale x 8 x half> %a,
158 <vscale x 8 x half> zeroinitializer)
159 ret <vscale x 8 x half> %out
162 define <vscale x 8 x half> @fmax_h_immzero_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #1 {
163 ; CHECK-LABEL: fmax_h_immzero_zero:
165 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
166 ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, #0.0
168 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
169 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1> %pg,
170 <vscale x 8 x half> %a_z,
171 <vscale x 8 x half> zeroinitializer)
172 ret <vscale x 8 x half> %out
175 define <vscale x 8 x half> @fmax_h_immone(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
176 ; CHECK-LABEL: fmax_h_immone:
178 ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, #1.0
180 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1> %pg,
181 <vscale x 8 x half> %a,
182 <vscale x 8 x half> splat(half 1.000000e+00))
183 ret <vscale x 8 x half> %out
186 define <vscale x 8 x half> @fmax_h_immone_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #1 {
187 ; CHECK-LABEL: fmax_h_immone_zero:
189 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
190 ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, #1.0
192 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
193 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1> %pg,
194 <vscale x 8 x half> %a_z,
195 <vscale x 8 x half> splat(half 1.000000e+00))
196 ret <vscale x 8 x half> %out
199 define <vscale x 4 x float> @fmax_s_immzero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
200 ; CHECK-LABEL: fmax_s_immzero:
202 ; CHECK-NEXT: fmax z0.s, p0/m, z0.s, #0.0
204 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1> %pg,
205 <vscale x 4 x float> %a,
206 <vscale x 4 x float> zeroinitializer)
207 ret <vscale x 4 x float> %out
210 define <vscale x 4 x float> @fmax_s_immzero_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #1 {
211 ; CHECK-LABEL: fmax_s_immzero_zero:
213 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
214 ; CHECK-NEXT: fmax z0.s, p0/m, z0.s, #0.0
216 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
217 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1> %pg,
218 <vscale x 4 x float> %a_z,
219 <vscale x 4 x float> zeroinitializer)
220 ret <vscale x 4 x float> %out
223 define <vscale x 4 x float> @fmax_s_immone(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
224 ; CHECK-LABEL: fmax_s_immone:
226 ; CHECK-NEXT: fmax z0.s, p0/m, z0.s, #1.0
228 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1> %pg,
229 <vscale x 4 x float> %a,
230 <vscale x 4 x float> splat(float 1.000000e+00))
231 ret <vscale x 4 x float> %out
234 define <vscale x 4 x float> @fmax_s_immone_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #1 {
235 ; CHECK-LABEL: fmax_s_immone_zero:
237 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
238 ; CHECK-NEXT: fmax z0.s, p0/m, z0.s, #1.0
240 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
241 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1> %pg,
242 <vscale x 4 x float> %a_z,
243 <vscale x 4 x float> splat(float 1.000000e+00))
244 ret <vscale x 4 x float> %out
247 define <vscale x 2 x double> @fmax_d_immzero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
248 ; CHECK-LABEL: fmax_d_immzero:
250 ; CHECK-NEXT: fmax z0.d, p0/m, z0.d, #0.0
252 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1> %pg,
253 <vscale x 2 x double> %a,
254 <vscale x 2 x double> zeroinitializer)
255 ret <vscale x 2 x double> %out
258 define <vscale x 2 x double> @fmax_d_immzero_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #1 {
259 ; CHECK-LABEL: fmax_d_immzero_zero:
261 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
262 ; CHECK-NEXT: fmax z0.d, p0/m, z0.d, #0.0
264 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
265 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1> %pg,
266 <vscale x 2 x double> %a_z,
267 <vscale x 2 x double> zeroinitializer)
268 ret <vscale x 2 x double> %out
271 define <vscale x 2 x double> @fmax_d_immone(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
272 ; CHECK-LABEL: fmax_d_immone:
274 ; CHECK-NEXT: fmax z0.d, p0/m, z0.d, #1.0
276 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1> %pg,
277 <vscale x 2 x double> %a,
278 <vscale x 2 x double> splat(double 1.000000e+00))
279 ret <vscale x 2 x double> %out
282 define <vscale x 2 x double> @fmax_d_immone_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #1 {
283 ; CHECK-LABEL: fmax_d_immone_zero:
285 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
286 ; CHECK-NEXT: fmax z0.d, p0/m, z0.d, #1.0
288 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
289 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1> %pg,
290 <vscale x 2 x double> %a_z,
291 <vscale x 2 x double> splat(double 1.000000e+00))
292 ret <vscale x 2 x double> %out
295 define <vscale x 8 x half> @fmaxnm_h_immzero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
296 ; CHECK-LABEL: fmaxnm_h_immzero:
298 ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, #0.0
300 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1> %pg,
301 <vscale x 8 x half> %a,
302 <vscale x 8 x half> zeroinitializer)
303 ret <vscale x 8 x half> %out
306 define <vscale x 8 x half> @fmaxnm_h_immzero_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #1 {
307 ; CHECK-LABEL: fmaxnm_h_immzero_zero:
309 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
310 ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, #0.0
312 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
313 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1> %pg,
314 <vscale x 8 x half> %a_z,
315 <vscale x 8 x half> zeroinitializer)
316 ret <vscale x 8 x half> %out
319 define <vscale x 8 x half> @fmaxnm_h_immone(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
320 ; CHECK-LABEL: fmaxnm_h_immone:
322 ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, #1.0
324 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1> %pg,
325 <vscale x 8 x half> %a,
326 <vscale x 8 x half> splat(half 1.000000e+00))
327 ret <vscale x 8 x half> %out
330 define <vscale x 8 x half> @fmaxnm_h_immone_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #1 {
331 ; CHECK-LABEL: fmaxnm_h_immone_zero:
333 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
334 ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, #1.0
336 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
337 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1> %pg,
338 <vscale x 8 x half> %a_z,
339 <vscale x 8 x half> splat(half 1.000000e+00))
340 ret <vscale x 8 x half> %out
343 define <vscale x 4 x float> @fmaxnm_s_immzero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
344 ; CHECK-LABEL: fmaxnm_s_immzero:
346 ; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, #0.0
348 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1> %pg,
349 <vscale x 4 x float> %a,
350 <vscale x 4 x float> zeroinitializer)
351 ret <vscale x 4 x float> %out
354 define <vscale x 4 x float> @fmaxnm_s_immzero_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #1 {
355 ; CHECK-LABEL: fmaxnm_s_immzero_zero:
357 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
358 ; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, #0.0
360 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
361 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1> %pg,
362 <vscale x 4 x float> %a_z,
363 <vscale x 4 x float> zeroinitializer)
364 ret <vscale x 4 x float> %out
367 define <vscale x 4 x float> @fmaxnm_s_immone(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
368 ; CHECK-LABEL: fmaxnm_s_immone:
370 ; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, #1.0
372 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1> %pg,
373 <vscale x 4 x float> %a,
374 <vscale x 4 x float> splat(float 1.000000e+00))
375 ret <vscale x 4 x float> %out
378 define <vscale x 4 x float> @fmaxnm_s_immone_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #1 {
379 ; CHECK-LABEL: fmaxnm_s_immone_zero:
381 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
382 ; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, #1.0
384 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
385 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1> %pg,
386 <vscale x 4 x float> %a_z,
387 <vscale x 4 x float> splat(float 1.000000e+00))
388 ret <vscale x 4 x float> %out
391 define <vscale x 2 x double> @fmaxnm_d_immzero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
392 ; CHECK-LABEL: fmaxnm_d_immzero:
394 ; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, #0.0
396 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1> %pg,
397 <vscale x 2 x double> %a,
398 <vscale x 2 x double> zeroinitializer)
399 ret <vscale x 2 x double> %out
402 define <vscale x 2 x double> @fmaxnm_d_immzero_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #1 {
403 ; CHECK-LABEL: fmaxnm_d_immzero_zero:
405 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
406 ; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, #0.0
408 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
409 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1> %pg,
410 <vscale x 2 x double> %a_z,
411 <vscale x 2 x double> zeroinitializer)
412 ret <vscale x 2 x double> %out
415 define <vscale x 2 x double> @fmaxnm_d_immone(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
416 ; CHECK-LABEL: fmaxnm_d_immone:
418 ; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, #1.0
420 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1> %pg,
421 <vscale x 2 x double> %a,
422 <vscale x 2 x double> splat(double 1.000000e+00))
423 ret <vscale x 2 x double> %out
426 define <vscale x 2 x double> @fmaxnm_d_immone_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #1 {
427 ; CHECK-LABEL: fmaxnm_d_immone_zero:
429 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
430 ; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, #1.0
432 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
433 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1> %pg,
434 <vscale x 2 x double> %a_z,
435 <vscale x 2 x double> splat(double 1.000000e+00))
436 ret <vscale x 2 x double> %out
439 define <vscale x 8 x half> @fmin_h_immzero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
440 ; CHECK-LABEL: fmin_h_immzero:
442 ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, #0.0
444 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1> %pg,
445 <vscale x 8 x half> %a,
446 <vscale x 8 x half> zeroinitializer)
447 ret <vscale x 8 x half> %out
450 define <vscale x 8 x half> @fmin_h_immzero_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #1 {
451 ; CHECK-LABEL: fmin_h_immzero_zero:
453 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
454 ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, #0.0
456 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
457 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1> %pg,
458 <vscale x 8 x half> %a_z,
459 <vscale x 8 x half> zeroinitializer)
460 ret <vscale x 8 x half> %out
463 define <vscale x 8 x half> @fmin_h_immone(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
464 ; CHECK-LABEL: fmin_h_immone:
466 ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, #1.0
468 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1> %pg,
469 <vscale x 8 x half> %a,
470 <vscale x 8 x half> splat(half 1.000000e+00))
471 ret <vscale x 8 x half> %out
474 define <vscale x 8 x half> @fmin_h_immone_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #1 {
475 ; CHECK-LABEL: fmin_h_immone_zero:
477 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
478 ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, #1.0
480 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
481 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1> %pg,
482 <vscale x 8 x half> %a_z,
483 <vscale x 8 x half> splat(half 1.000000e+00))
484 ret <vscale x 8 x half> %out
487 define <vscale x 4 x float> @fmin_s_immzero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
488 ; CHECK-LABEL: fmin_s_immzero:
490 ; CHECK-NEXT: fmin z0.s, p0/m, z0.s, #0.0
492 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1> %pg,
493 <vscale x 4 x float> %a,
494 <vscale x 4 x float> zeroinitializer)
495 ret <vscale x 4 x float> %out
498 define <vscale x 4 x float> @fmin_s_immzero_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #1 {
499 ; CHECK-LABEL: fmin_s_immzero_zero:
501 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
502 ; CHECK-NEXT: fmin z0.s, p0/m, z0.s, #0.0
504 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
505 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1> %pg,
506 <vscale x 4 x float> %a_z,
507 <vscale x 4 x float> zeroinitializer)
508 ret <vscale x 4 x float> %out
511 define <vscale x 4 x float> @fmin_s_immone(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
512 ; CHECK-LABEL: fmin_s_immone:
514 ; CHECK-NEXT: fmin z0.s, p0/m, z0.s, #1.0
516 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1> %pg,
517 <vscale x 4 x float> %a,
518 <vscale x 4 x float> splat(float 1.000000e+00))
519 ret <vscale x 4 x float> %out
522 define <vscale x 4 x float> @fmin_s_immone_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #1 {
523 ; CHECK-LABEL: fmin_s_immone_zero:
525 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
526 ; CHECK-NEXT: fmin z0.s, p0/m, z0.s, #1.0
528 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
529 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1> %pg,
530 <vscale x 4 x float> %a_z,
531 <vscale x 4 x float> splat(float 1.000000e+00))
532 ret <vscale x 4 x float> %out
535 define <vscale x 2 x double> @fmin_d_immzero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
536 ; CHECK-LABEL: fmin_d_immzero:
538 ; CHECK-NEXT: fmin z0.d, p0/m, z0.d, #0.0
540 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1> %pg,
541 <vscale x 2 x double> %a,
542 <vscale x 2 x double> zeroinitializer)
543 ret <vscale x 2 x double> %out
546 define <vscale x 2 x double> @fmin_d_immzero_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #1 {
547 ; CHECK-LABEL: fmin_d_immzero_zero:
549 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
550 ; CHECK-NEXT: fmin z0.d, p0/m, z0.d, #0.0
552 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
553 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1> %pg,
554 <vscale x 2 x double> %a_z,
555 <vscale x 2 x double> zeroinitializer)
556 ret <vscale x 2 x double> %out
559 define <vscale x 2 x double> @fmin_d_immone(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
560 ; CHECK-LABEL: fmin_d_immone:
562 ; CHECK-NEXT: fmin z0.d, p0/m, z0.d, #1.0
564 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1> %pg,
565 <vscale x 2 x double> %a,
566 <vscale x 2 x double> splat(double 1.000000e+00))
567 ret <vscale x 2 x double> %out
570 define <vscale x 2 x double> @fmin_d_immone_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #1 {
571 ; CHECK-LABEL: fmin_d_immone_zero:
573 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
574 ; CHECK-NEXT: fmin z0.d, p0/m, z0.d, #1.0
576 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
577 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1> %pg,
578 <vscale x 2 x double> %a_z,
579 <vscale x 2 x double> splat(double 1.000000e+00))
580 ret <vscale x 2 x double> %out
583 define <vscale x 8 x half> @fminnm_h_immzero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
584 ; CHECK-LABEL: fminnm_h_immzero:
586 ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, #0.0
588 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1> %pg,
589 <vscale x 8 x half> %a,
590 <vscale x 8 x half> zeroinitializer)
591 ret <vscale x 8 x half> %out
594 define <vscale x 8 x half> @fminnm_h_immzero_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #1 {
595 ; CHECK-LABEL: fminnm_h_immzero_zero:
597 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
598 ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, #0.0
600 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
601 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1> %pg,
602 <vscale x 8 x half> %a_z,
603 <vscale x 8 x half> zeroinitializer)
604 ret <vscale x 8 x half> %out
607 define <vscale x 8 x half> @fminnm_h_immone(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
608 ; CHECK-LABEL: fminnm_h_immone:
610 ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, #1.0
612 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1> %pg,
613 <vscale x 8 x half> %a,
614 <vscale x 8 x half> splat(half 1.000000e+00))
615 ret <vscale x 8 x half> %out
618 define <vscale x 8 x half> @fminnm_h_immone_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #1 {
619 ; CHECK-LABEL: fminnm_h_immone_zero:
621 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
622 ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, #1.0
624 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
625 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1> %pg,
626 <vscale x 8 x half> %a_z,
627 <vscale x 8 x half> splat(half 1.000000e+00))
628 ret <vscale x 8 x half> %out
631 define <vscale x 4 x float> @fminnm_s_immzero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
632 ; CHECK-LABEL: fminnm_s_immzero:
634 ; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, #0.0
636 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1> %pg,
637 <vscale x 4 x float> %a,
638 <vscale x 4 x float> zeroinitializer)
639 ret <vscale x 4 x float> %out
642 define <vscale x 4 x float> @fminnm_s_immzero_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #1 {
643 ; CHECK-LABEL: fminnm_s_immzero_zero:
645 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
646 ; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, #0.0
648 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
649 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1> %pg,
650 <vscale x 4 x float> %a_z,
651 <vscale x 4 x float> zeroinitializer)
652 ret <vscale x 4 x float> %out
655 define <vscale x 4 x float> @fminnm_s_immone(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
656 ; CHECK-LABEL: fminnm_s_immone:
658 ; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, #1.0
660 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1> %pg,
661 <vscale x 4 x float> %a,
662 <vscale x 4 x float> splat(float 1.000000e+00))
663 ret <vscale x 4 x float> %out
666 define <vscale x 4 x float> @fminnm_s_immone_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #1 {
667 ; CHECK-LABEL: fminnm_s_immone_zero:
669 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
670 ; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, #1.0
672 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
673 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1> %pg,
674 <vscale x 4 x float> %a_z,
675 <vscale x 4 x float> splat(float 1.000000e+00))
676 ret <vscale x 4 x float> %out
679 define <vscale x 2 x double> @fminnm_d_immzero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
680 ; CHECK-LABEL: fminnm_d_immzero:
682 ; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, #0.0
684 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1> %pg,
685 <vscale x 2 x double> %a,
686 <vscale x 2 x double> zeroinitializer)
687 ret <vscale x 2 x double> %out
690 define <vscale x 2 x double> @fminnm_d_immzero_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #1 {
691 ; CHECK-LABEL: fminnm_d_immzero_zero:
693 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
694 ; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, #0.0
696 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
697 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1> %pg,
698 <vscale x 2 x double> %a_z,
699 <vscale x 2 x double> zeroinitializer)
700 ret <vscale x 2 x double> %out
703 define <vscale x 2 x double> @fminnm_d_immone(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
704 ; CHECK-LABEL: fminnm_d_immone:
706 ; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, #1.0
708 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1> %pg,
709 <vscale x 2 x double> %a,
710 <vscale x 2 x double> splat(double 1.000000e+00))
711 ret <vscale x 2 x double> %out
714 define <vscale x 2 x double> @fminnm_d_immone_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #1 {
715 ; CHECK-LABEL: fminnm_d_immone_zero:
717 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
718 ; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, #1.0
720 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
721 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1> %pg,
722 <vscale x 2 x double> %a_z,
723 <vscale x 2 x double> splat(double 1.000000e+00))
724 ret <vscale x 2 x double> %out
727 define <vscale x 8 x half> @fmul_h_immhalf(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
728 ; CHECK-LABEL: fmul_h_immhalf:
730 ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, #0.5
732 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> %pg,
733 <vscale x 8 x half> %a,
734 <vscale x 8 x half> splat(half 0.500000e+00))
735 ret <vscale x 8 x half> %out
738 define <vscale x 8 x half> @fmul_h_immhalf_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #1 {
739 ; CHECK-LABEL: fmul_h_immhalf_zero:
741 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
742 ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, #0.5
744 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
745 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> %pg,
746 <vscale x 8 x half> %a_z,
747 <vscale x 8 x half> splat(half 0.500000e+00))
748 ret <vscale x 8 x half> %out
751 define <vscale x 8 x half> @fmul_h_immtwo(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
752 ; CHECK-LABEL: fmul_h_immtwo:
754 ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, #2.0
756 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> %pg,
757 <vscale x 8 x half> %a,
758 <vscale x 8 x half> splat(half 2.000000e+00))
759 ret <vscale x 8 x half> %out
762 define <vscale x 8 x half> @fmul_h_immtwo_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #1 {
763 ; CHECK-LABEL: fmul_h_immtwo_zero:
765 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
766 ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, #2.0
768 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
769 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> %pg,
770 <vscale x 8 x half> %a_z,
771 <vscale x 8 x half> splat(half 2.000000e+00))
772 ret <vscale x 8 x half> %out
775 define <vscale x 4 x float> @fmul_s_immhalf(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
776 ; CHECK-LABEL: fmul_s_immhalf:
778 ; CHECK-NEXT: fmul z0.s, p0/m, z0.s, #0.5
780 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> %pg,
781 <vscale x 4 x float> %a,
782 <vscale x 4 x float> splat(float 0.500000e+00))
783 ret <vscale x 4 x float> %out
786 define <vscale x 4 x float> @fmul_s_immhalf_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #1 {
787 ; CHECK-LABEL: fmul_s_immhalf_zero:
789 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
790 ; CHECK-NEXT: fmul z0.s, p0/m, z0.s, #0.5
792 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
793 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> %pg,
794 <vscale x 4 x float> %a_z,
795 <vscale x 4 x float> splat(float 0.500000e+00))
796 ret <vscale x 4 x float> %out
799 define <vscale x 4 x float> @fmul_s_immtwo(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
800 ; CHECK-LABEL: fmul_s_immtwo:
802 ; CHECK-NEXT: fmul z0.s, p0/m, z0.s, #2.0
804 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> %pg,
805 <vscale x 4 x float> %a,
806 <vscale x 4 x float> splat(float 2.000000e+00))
807 ret <vscale x 4 x float> %out
810 define <vscale x 4 x float> @fmul_s_immtwo_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #1 {
811 ; CHECK-LABEL: fmul_s_immtwo_zero:
813 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
814 ; CHECK-NEXT: fmul z0.s, p0/m, z0.s, #2.0
816 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
817 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> %pg,
818 <vscale x 4 x float> %a_z,
819 <vscale x 4 x float> splat(float 2.000000e+00))
820 ret <vscale x 4 x float> %out
823 define <vscale x 2 x double> @fmul_d_immhalf(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
824 ; CHECK-LABEL: fmul_d_immhalf:
826 ; CHECK-NEXT: fmul z0.d, p0/m, z0.d, #0.5
828 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> %pg,
829 <vscale x 2 x double> %a,
830 <vscale x 2 x double> splat(double 0.500000e+00))
831 ret <vscale x 2 x double> %out
834 define <vscale x 2 x double> @fmul_d_immhalf_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #1 {
835 ; CHECK-LABEL: fmul_d_immhalf_zero:
837 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
838 ; CHECK-NEXT: fmul z0.d, p0/m, z0.d, #0.5
840 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
841 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> %pg,
842 <vscale x 2 x double> %a_z,
843 <vscale x 2 x double> splat(double 0.500000e+00))
844 ret <vscale x 2 x double> %out
847 define <vscale x 2 x double> @fmul_d_immtwo(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
848 ; CHECK-LABEL: fmul_d_immtwo:
850 ; CHECK-NEXT: fmul z0.d, p0/m, z0.d, #2.0
852 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> %pg,
853 <vscale x 2 x double> %a,
854 <vscale x 2 x double> splat(double 2.000000e+00))
855 ret <vscale x 2 x double> %out
858 define <vscale x 2 x double> @fmul_d_immtwo_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #1 {
859 ; CHECK-LABEL: fmul_d_immtwo_zero:
861 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
862 ; CHECK-NEXT: fmul z0.d, p0/m, z0.d, #2.0
864 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
865 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> %pg,
866 <vscale x 2 x double> %a_z,
867 <vscale x 2 x double> splat(double 2.000000e+00))
868 ret <vscale x 2 x double> %out
871 define <vscale x 8 x half> @fsub_h_immhalf(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
872 ; CHECK-LABEL: fsub_h_immhalf:
874 ; CHECK-NEXT: fsub z0.h, p0/m, z0.h, #0.5
876 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1> %pg,
877 <vscale x 8 x half> %a,
878 <vscale x 8 x half> splat(half 0.500000e+00))
879 ret <vscale x 8 x half> %out
882 define <vscale x 8 x half> @fsub_h_immhalf_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #1 {
883 ; CHECK-LABEL: fsub_h_immhalf_zero:
885 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
886 ; CHECK-NEXT: fsub z0.h, p0/m, z0.h, #0.5
888 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
889 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1> %pg,
890 <vscale x 8 x half> %a_z,
891 <vscale x 8 x half> splat(half 0.500000e+00))
892 ret <vscale x 8 x half> %out
895 define <vscale x 8 x half> @fsub_h_immone(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
896 ; CHECK-LABEL: fsub_h_immone:
898 ; CHECK-NEXT: fsub z0.h, p0/m, z0.h, #1.0
900 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1> %pg,
901 <vscale x 8 x half> %a,
902 <vscale x 8 x half> splat(half 1.000000e+00))
903 ret <vscale x 8 x half> %out
906 define <vscale x 8 x half> @fsub_h_immone_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #1 {
907 ; CHECK-LABEL: fsub_h_immone_zero:
909 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
910 ; CHECK-NEXT: fsub z0.h, p0/m, z0.h, #1.0
912 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
913 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1> %pg,
914 <vscale x 8 x half> %a_z,
915 <vscale x 8 x half> splat(half 1.000000e+00))
916 ret <vscale x 8 x half> %out
919 define <vscale x 4 x float> @fsub_s_immhalf(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
920 ; CHECK-LABEL: fsub_s_immhalf:
922 ; CHECK-NEXT: fsub z0.s, p0/m, z0.s, #0.5
924 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1> %pg,
925 <vscale x 4 x float> %a,
926 <vscale x 4 x float> splat(float 0.500000e+00))
927 ret <vscale x 4 x float> %out
930 define <vscale x 4 x float> @fsub_s_immhalf_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #1 {
931 ; CHECK-LABEL: fsub_s_immhalf_zero:
933 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
934 ; CHECK-NEXT: fsub z0.s, p0/m, z0.s, #0.5
936 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
937 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1> %pg,
938 <vscale x 4 x float> %a_z,
939 <vscale x 4 x float> splat(float 0.500000e+00))
940 ret <vscale x 4 x float> %out
943 define <vscale x 4 x float> @fsub_s_immone(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
944 ; CHECK-LABEL: fsub_s_immone:
946 ; CHECK-NEXT: fsub z0.s, p0/m, z0.s, #1.0
948 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1> %pg,
949 <vscale x 4 x float> %a,
950 <vscale x 4 x float> splat(float 1.000000e+00))
951 ret <vscale x 4 x float> %out
954 define <vscale x 4 x float> @fsub_s_immone_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #1 {
955 ; CHECK-LABEL: fsub_s_immone_zero:
957 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
958 ; CHECK-NEXT: fsub z0.s, p0/m, z0.s, #1.0
960 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
961 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1> %pg,
962 <vscale x 4 x float> %a_z,
963 <vscale x 4 x float> splat(float 1.000000e+00))
964 ret <vscale x 4 x float> %out
967 define <vscale x 2 x double> @fsub_d_immhalf(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
968 ; CHECK-LABEL: fsub_d_immhalf:
970 ; CHECK-NEXT: fsub z0.d, p0/m, z0.d, #0.5
972 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1> %pg,
973 <vscale x 2 x double> %a,
974 <vscale x 2 x double> splat(double 0.500000e+00))
975 ret <vscale x 2 x double> %out
978 define <vscale x 2 x double> @fsub_d_immhalf_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #1 {
979 ; CHECK-LABEL: fsub_d_immhalf_zero:
981 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
982 ; CHECK-NEXT: fsub z0.d, p0/m, z0.d, #0.5
984 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
985 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1> %pg,
986 <vscale x 2 x double> %a_z,
987 <vscale x 2 x double> splat(double 0.500000e+00))
988 ret <vscale x 2 x double> %out
991 define <vscale x 2 x double> @fsub_d_immone(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
992 ; CHECK-LABEL: fsub_d_immone:
994 ; CHECK-NEXT: fsub z0.d, p0/m, z0.d, #1.0
996 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1> %pg,
997 <vscale x 2 x double> %a,
998 <vscale x 2 x double> splat(double 1.000000e+00))
999 ret <vscale x 2 x double> %out
1002 define <vscale x 2 x double> @fsub_d_immone_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #1 {
1003 ; CHECK-LABEL: fsub_d_immone_zero:
1005 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
1006 ; CHECK-NEXT: fsub z0.d, p0/m, z0.d, #1.0
1008 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
1009 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1> %pg,
1010 <vscale x 2 x double> %a_z,
1011 <vscale x 2 x double> splat(double 1.000000e+00))
1012 ret <vscale x 2 x double> %out
1015 define <vscale x 8 x half> @fsubr_h_immhalf(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #1 {
1016 ; CHECK-LABEL: fsubr_h_immhalf:
1018 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
1019 ; CHECK-NEXT: fsubr z0.h, p0/m, z0.h, #0.5
1021 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
1022 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1> %pg,
1023 <vscale x 8 x half> %a_z,
1024 <vscale x 8 x half> splat(half 0.500000e+00))
1025 ret <vscale x 8 x half> %out
1028 define <vscale x 8 x half> @fsubr_h_immone(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #1 {
1029 ; CHECK-LABEL: fsubr_h_immone:
1031 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
1032 ; CHECK-NEXT: fsubr z0.h, p0/m, z0.h, #1.0
1034 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
1035 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1> %pg,
1036 <vscale x 8 x half> %a_z,
1037 <vscale x 8 x half> splat(half 1.000000e+00))
1038 ret <vscale x 8 x half> %out
1041 define <vscale x 4 x float> @fsubr_s_immhalf(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #1 {
1042 ; CHECK-LABEL: fsubr_s_immhalf:
1044 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
1045 ; CHECK-NEXT: fsubr z0.s, p0/m, z0.s, #0.5
1047 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
1048 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1> %pg,
1049 <vscale x 4 x float> %a_z,
1050 <vscale x 4 x float> splat(float 0.500000e+00))
1051 ret <vscale x 4 x float> %out
1054 define <vscale x 4 x float> @fsubr_s_immone(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #1 {
1055 ; CHECK-LABEL: fsubr_s_immone:
1057 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
1058 ; CHECK-NEXT: fsubr z0.s, p0/m, z0.s, #1.0
1060 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
1061 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1> %pg,
1062 <vscale x 4 x float> %a_z,
1063 <vscale x 4 x float> splat(float 1.000000e+00))
1064 ret <vscale x 4 x float> %out
1067 define <vscale x 2 x double> @fsubr_d_immhalf(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #1 {
1068 ; CHECK-LABEL: fsubr_d_immhalf:
1070 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
1071 ; CHECK-NEXT: fsubr z0.d, p0/m, z0.d, #0.5
1073 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
1074 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1> %pg,
1075 <vscale x 2 x double> %a_z,
1076 <vscale x 2 x double> splat(double 0.500000e+00))
1077 ret <vscale x 2 x double> %out
1080 define <vscale x 2 x double> @fsubr_d_immone(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #1 {
1081 ; CHECK-LABEL: fsubr_d_immone:
1083 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
1084 ; CHECK-NEXT: fsubr z0.d, p0/m, z0.d, #1.0
1086 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
1087 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1> %pg,
1088 <vscale x 2 x double> %a_z,
1089 <vscale x 2 x double> splat(double 1.000000e+00))
1090 ret <vscale x 2 x double> %out
1093 declare <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1094 declare <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1095 declare <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1097 declare <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1098 declare <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1099 declare <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1101 declare <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1102 declare <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1103 declare <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1105 declare <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1106 declare <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1107 declare <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1109 declare <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1110 declare <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1111 declare <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1113 declare <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1114 declare <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1115 declare <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1117 declare <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1118 declare <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1119 declare <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1121 declare <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1122 declare <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1123 declare <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1125 attributes #0 = { "target-features"="+sve" }
1126 attributes #1 = { "target-features"="+sve,+use-experimental-zeroing-pseudos" }