1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -aarch64-enable-simd-scalar| FileCheck %s
4 define <8 x i8> @add8xi8(<8 x i8> %A, <8 x i8> %B) {
5 ; CHECK-LABEL: add8xi8:
7 ; CHECK-NEXT: add v0.8b, v0.8b, v1.8b
9 %tmp3 = add <8 x i8> %A, %B;
13 define <16 x i8> @add16xi8(<16 x i8> %A, <16 x i8> %B) {
14 ; CHECK-LABEL: add16xi8:
16 ; CHECK-NEXT: add v0.16b, v0.16b, v1.16b
18 %tmp3 = add <16 x i8> %A, %B;
22 define <4 x i16> @add4xi16(<4 x i16> %A, <4 x i16> %B) {
23 ; CHECK-LABEL: add4xi16:
25 ; CHECK-NEXT: add v0.4h, v0.4h, v1.4h
27 %tmp3 = add <4 x i16> %A, %B;
31 define <8 x i16> @add8xi16(<8 x i16> %A, <8 x i16> %B) {
32 ; CHECK-LABEL: add8xi16:
34 ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h
36 %tmp3 = add <8 x i16> %A, %B;
40 define <2 x i32> @add2xi32(<2 x i32> %A, <2 x i32> %B) {
41 ; CHECK-LABEL: add2xi32:
43 ; CHECK-NEXT: add v0.2s, v0.2s, v1.2s
45 %tmp3 = add <2 x i32> %A, %B;
49 define <4 x i32> @add4x32(<4 x i32> %A, <4 x i32> %B) {
50 ; CHECK-LABEL: add4x32:
52 ; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
54 %tmp3 = add <4 x i32> %A, %B;
58 define <2 x i64> @add2xi64(<2 x i64> %A, <2 x i64> %B) {
59 ; CHECK-LABEL: add2xi64:
61 ; CHECK-NEXT: add v0.2d, v0.2d, v1.2d
63 %tmp3 = add <2 x i64> %A, %B;
67 define <2 x float> @add2xfloat(<2 x float> %A, <2 x float> %B) {
68 ; CHECK-LABEL: add2xfloat:
70 ; CHECK-NEXT: fadd v0.2s, v0.2s, v1.2s
72 %tmp3 = fadd <2 x float> %A, %B;
76 define <4 x float> @add4xfloat(<4 x float> %A, <4 x float> %B) {
77 ; CHECK-LABEL: add4xfloat:
79 ; CHECK-NEXT: fadd v0.4s, v0.4s, v1.4s
81 %tmp3 = fadd <4 x float> %A, %B;
84 define <2 x double> @add2xdouble(<2 x double> %A, <2 x double> %B) {
85 ; CHECK-LABEL: add2xdouble:
87 ; CHECK-NEXT: fadd v0.2d, v0.2d, v1.2d
89 %tmp3 = fadd <2 x double> %A, %B;
90 ret <2 x double> %tmp3
93 define <8 x i8> @sub8xi8(<8 x i8> %A, <8 x i8> %B) {
94 ; CHECK-LABEL: sub8xi8:
96 ; CHECK-NEXT: sub v0.8b, v0.8b, v1.8b
98 %tmp3 = sub <8 x i8> %A, %B;
102 define <16 x i8> @sub16xi8(<16 x i8> %A, <16 x i8> %B) {
103 ; CHECK-LABEL: sub16xi8:
105 ; CHECK-NEXT: sub v0.16b, v0.16b, v1.16b
107 %tmp3 = sub <16 x i8> %A, %B;
111 define <4 x i16> @sub4xi16(<4 x i16> %A, <4 x i16> %B) {
112 ; CHECK-LABEL: sub4xi16:
114 ; CHECK-NEXT: sub v0.4h, v0.4h, v1.4h
116 %tmp3 = sub <4 x i16> %A, %B;
120 define <8 x i16> @sub8xi16(<8 x i16> %A, <8 x i16> %B) {
121 ; CHECK-LABEL: sub8xi16:
123 ; CHECK-NEXT: sub v0.8h, v0.8h, v1.8h
125 %tmp3 = sub <8 x i16> %A, %B;
129 define <2 x i32> @sub2xi32(<2 x i32> %A, <2 x i32> %B) {
130 ; CHECK-LABEL: sub2xi32:
132 ; CHECK-NEXT: sub v0.2s, v0.2s, v1.2s
134 %tmp3 = sub <2 x i32> %A, %B;
138 define <4 x i32> @sub4x32(<4 x i32> %A, <4 x i32> %B) {
139 ; CHECK-LABEL: sub4x32:
141 ; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
143 %tmp3 = sub <4 x i32> %A, %B;
147 define <2 x i64> @sub2xi64(<2 x i64> %A, <2 x i64> %B) {
148 ; CHECK-LABEL: sub2xi64:
150 ; CHECK-NEXT: sub v0.2d, v0.2d, v1.2d
152 %tmp3 = sub <2 x i64> %A, %B;
156 define <2 x float> @sub2xfloat(<2 x float> %A, <2 x float> %B) {
157 ; CHECK-LABEL: sub2xfloat:
159 ; CHECK-NEXT: fsub v0.2s, v0.2s, v1.2s
161 %tmp3 = fsub <2 x float> %A, %B;
162 ret <2 x float> %tmp3
165 define <4 x float> @sub4xfloat(<4 x float> %A, <4 x float> %B) {
166 ; CHECK-LABEL: sub4xfloat:
168 ; CHECK-NEXT: fsub v0.4s, v0.4s, v1.4s
170 %tmp3 = fsub <4 x float> %A, %B;
171 ret <4 x float> %tmp3
173 define <2 x double> @sub2xdouble(<2 x double> %A, <2 x double> %B) {
174 ; CHECK-LABEL: sub2xdouble:
176 ; CHECK-NEXT: fsub v0.2d, v0.2d, v1.2d
178 %tmp3 = fsub <2 x double> %A, %B;
179 ret <2 x double> %tmp3
182 define <1 x double> @test_vadd_f64(<1 x double> %a, <1 x double> %b) {
183 ; CHECK-LABEL: test_vadd_f64:
185 ; CHECK-NEXT: fadd d0, d0, d1
187 %1 = fadd <1 x double> %a, %b
191 define <1 x double> @test_vmul_f64(<1 x double> %a, <1 x double> %b) {
192 ; CHECK-LABEL: test_vmul_f64:
194 ; CHECK-NEXT: fmul d0, d0, d1
196 %1 = fmul <1 x double> %a, %b
200 define <1 x double> @test_vdiv_f64(<1 x double> %a, <1 x double> %b) {
201 ; CHECK-LABEL: test_vdiv_f64:
203 ; CHECK-NEXT: fdiv d0, d0, d1
205 %1 = fdiv <1 x double> %a, %b
209 define <1 x double> @test_vmla_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
210 ; CHECK-LABEL: test_vmla_f64:
212 ; CHECK-NEXT: fmul d1, d1, d2
213 ; CHECK-NEXT: fadd d0, d1, d0
215 %1 = fmul <1 x double> %b, %c
216 %2 = fadd <1 x double> %1, %a
220 define <1 x double> @test_vmls_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
221 ; CHECK-LABEL: test_vmls_f64:
223 ; CHECK-NEXT: fmul d1, d1, d2
224 ; CHECK-NEXT: fsub d0, d0, d1
226 %1 = fmul <1 x double> %b, %c
227 %2 = fsub <1 x double> %a, %1
231 define <1 x double> @test_vfms_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
232 ; CHECK-LABEL: test_vfms_f64:
234 ; CHECK-NEXT: fmsub d0, d1, d2, d0
236 %1 = fsub <1 x double> <double -0.000000e+00>, %b
237 %2 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %1, <1 x double> %c, <1 x double> %a)
241 define <1 x double> @test_vfma_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
242 ; CHECK-LABEL: test_vfma_f64:
244 ; CHECK-NEXT: fmadd d0, d1, d2, d0
246 %1 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %b, <1 x double> %c, <1 x double> %a)
250 define <1 x double> @test_vsub_f64(<1 x double> %a, <1 x double> %b) {
251 ; CHECK-LABEL: test_vsub_f64:
253 ; CHECK-NEXT: fsub d0, d0, d1
255 %1 = fsub <1 x double> %a, %b
259 define <1 x double> @test_vabd_f64(<1 x double> %a, <1 x double> %b) {
260 ; CHECK-LABEL: test_vabd_f64:
262 ; CHECK-NEXT: fabd d0, d0, d1
264 %1 = tail call <1 x double> @llvm.aarch64.neon.fabd.v1f64(<1 x double> %a, <1 x double> %b)
268 define <1 x double> @test_vmax_f64(<1 x double> %a, <1 x double> %b) {
269 ; CHECK-LABEL: test_vmax_f64:
271 ; CHECK-NEXT: fmax d0, d0, d1
273 %1 = tail call <1 x double> @llvm.aarch64.neon.fmax.v1f64(<1 x double> %a, <1 x double> %b)
277 define <1 x double> @test_vmin_f64(<1 x double> %a, <1 x double> %b) {
278 ; CHECK-LABEL: test_vmin_f64:
280 ; CHECK-NEXT: fmin d0, d0, d1
282 %1 = tail call <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double> %a, <1 x double> %b)
286 define <1 x double> @test_vmaxnm_f64(<1 x double> %a, <1 x double> %b) {
287 ; CHECK-LABEL: test_vmaxnm_f64:
289 ; CHECK-NEXT: fmaxnm d0, d0, d1
291 %1 = tail call <1 x double> @llvm.aarch64.neon.fmaxnm.v1f64(<1 x double> %a, <1 x double> %b)
295 define <1 x double> @test_vminnm_f64(<1 x double> %a, <1 x double> %b) {
296 ; CHECK-LABEL: test_vminnm_f64:
298 ; CHECK-NEXT: fminnm d0, d0, d1
300 %1 = tail call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> %a, <1 x double> %b)
304 define <1 x double> @test_vabs_f64(<1 x double> %a) {
305 ; CHECK-LABEL: test_vabs_f64:
307 ; CHECK-NEXT: fabs d0, d0
309 %1 = tail call <1 x double> @llvm.fabs.v1f64(<1 x double> %a)
313 define <1 x double> @test_vneg_f64(<1 x double> %a) {
314 ; CHECK-LABEL: test_vneg_f64:
316 ; CHECK-NEXT: fneg d0, d0
318 %1 = fsub <1 x double> <double -0.000000e+00>, %a
322 declare <1 x double> @llvm.fabs.v1f64(<1 x double>)
323 declare <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double>, <1 x double>)
324 declare <1 x double> @llvm.aarch64.neon.fmaxnm.v1f64(<1 x double>, <1 x double>)
325 declare <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double>, <1 x double>)
326 declare <1 x double> @llvm.aarch64.neon.fmax.v1f64(<1 x double>, <1 x double>)
327 declare <1 x double> @llvm.aarch64.neon.fabd.v1f64(<1 x double>, <1 x double>)
328 declare <1 x double> @llvm.fma.v1f64(<1 x double>, <1 x double>, <1 x double>)