[ARM] More MVE compare vector splat combines for ANDs
[llvm-complete.git] / test / CodeGen / ARM / fp16-intrinsic-vector-1op.ll
blobf95def9d4d6548956fb7016f5f428a4b07f52445
1 ; RUN: llc < %s -mtriple=arm-none-eabi -mattr=+v8.2a,+fullfp16,+neon  -float-abi=hard   | FileCheck %s --check-prefixes=CHECK,CHECK-HARD
2 ; RUN: llc < %s -mtriple=armeb-none-eabi -mattr=+v8.2a,+fullfp16,+neon  -float-abi=hard   | FileCheck %s --check-prefixes=CHECK,CHECK-HARD-BE
3 ; RUN: llc < %s -mtriple=arm-none-eabi -mattr=+v8.2a,+fullfp16,+neon  | FileCheck %s --check-prefixes=CHECK,CHECK-SOFTFP
4 ; RUN: llc < %s -mtriple=armeb-none-eabi -mattr=+v8.2a,+fullfp16,+neon  | FileCheck %s --check-prefixes=CHECK,CHECK-SOFTFP-BE
6 declare <8 x half> @llvm.fabs.v8f16(<8 x half>)
8 define dso_local <8 x half> @t_vabsq_f16(<8 x half> %a) {
9 ; CHECK-LABEL:      t_vabsq_f16:
11 ; CHECK-HARD:         vabs.f16  q0, q0
12 ; CHECK-HARD-NEXT:    bx  lr
14 ; CHECK-HARD-BE:      vrev64.16 [[Q8:q[0-9]+]], q0
15 ; CHECK-HARD-BE-NEXT: vabs.f16  [[Q8]], [[Q8]]
16 ; CHECK-HARD-BE-NEXT: vrev64.16 q0, [[Q8]]
17 ; CHECK-HARD-BE-NEXT: bx  lr
19 ; CHECK-SOFTFP:       vmov  d{{.*}}, r2, r3
20 ; CHECK-SOFTFP:       vmov  d{{.*}}, r0, r1
21 ; CHECK-SOFTFP:       vabs.f16  q{{.*}}, q{{.*}}
22 ; CHECK-SOFTFP:       vmov  r0, r1, d{{.*}}
23 ; CHECK-SOFTFP:       vmov  r2, r3, d{{.*}}
24 ; CHECK-SOFTFP:       bx  lr
26 ; CHECK-SOFTFP-BE:    vmov  [[D17:d[0-9]+]], r3, r2
27 ; CHECK-SOFTFP-BE:    vmov  [[D16:d[0-9]+]], r1, r0
28 ; CHECK-SOFTFP-BE:    vrev64.16 [[Q8:q[0-9]+]], [[Q8]]
29 ; CHECK-SOFTFP-BE:    vabs.f16  [[Q8]], [[Q8]]
30 ; CHECK-SOFTFP-BE:    vrev64.16 [[Q8]], [[Q8]]
31 ; CHECK-SOFTFP-BE:    vmov  r1, r0, [[D16]]
32 ; CHECK-SOFTFP-BE:    vmov  r3, r2, [[D17]]
33 ; CHECK-SOFTFP-BE:    bx  lr
35 entry:
36   %vabs1.i = tail call <8 x half> @llvm.fabs.v8f16(<8 x half> %a) #3
37   ret <8 x half> %vabs1.i