1 ; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
3 define <8 x i8> @vnegs8(<8 x i8>* %A) nounwind {
6 %tmp1 = load <8 x i8>* %A
7 %tmp2 = sub <8 x i8> zeroinitializer, %tmp1
11 define <4 x i16> @vnegs16(<4 x i16>* %A) nounwind {
14 %tmp1 = load <4 x i16>* %A
15 %tmp2 = sub <4 x i16> zeroinitializer, %tmp1
19 define <2 x i32> @vnegs32(<2 x i32>* %A) nounwind {
22 %tmp1 = load <2 x i32>* %A
23 %tmp2 = sub <2 x i32> zeroinitializer, %tmp1
27 define <2 x float> @vnegf32(<2 x float>* %A) nounwind {
30 %tmp1 = load <2 x float>* %A
31 %tmp2 = fsub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1
35 define <16 x i8> @vnegQs8(<16 x i8>* %A) nounwind {
38 %tmp1 = load <16 x i8>* %A
39 %tmp2 = sub <16 x i8> zeroinitializer, %tmp1
43 define <8 x i16> @vnegQs16(<8 x i16>* %A) nounwind {
46 %tmp1 = load <8 x i16>* %A
47 %tmp2 = sub <8 x i16> zeroinitializer, %tmp1
51 define <4 x i32> @vnegQs32(<4 x i32>* %A) nounwind {
54 %tmp1 = load <4 x i32>* %A
55 %tmp2 = sub <4 x i32> zeroinitializer, %tmp1
59 define <4 x float> @vnegQf32(<4 x float>* %A) nounwind {
62 %tmp1 = load <4 x float>* %A
63 %tmp2 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp1
67 define <8 x i8> @vqnegs8(<8 x i8>* %A) nounwind {
70 %tmp1 = load <8 x i8>* %A
71 %tmp2 = call <8 x i8> @llvm.arm.neon.vqneg.v8i8(<8 x i8> %tmp1)
75 define <4 x i16> @vqnegs16(<4 x i16>* %A) nounwind {
78 %tmp1 = load <4 x i16>* %A
79 %tmp2 = call <4 x i16> @llvm.arm.neon.vqneg.v4i16(<4 x i16> %tmp1)
83 define <2 x i32> @vqnegs32(<2 x i32>* %A) nounwind {
86 %tmp1 = load <2 x i32>* %A
87 %tmp2 = call <2 x i32> @llvm.arm.neon.vqneg.v2i32(<2 x i32> %tmp1)
91 define <16 x i8> @vqnegQs8(<16 x i8>* %A) nounwind {
94 %tmp1 = load <16 x i8>* %A
95 %tmp2 = call <16 x i8> @llvm.arm.neon.vqneg.v16i8(<16 x i8> %tmp1)
99 define <8 x i16> @vqnegQs16(<8 x i16>* %A) nounwind {
102 %tmp1 = load <8 x i16>* %A
103 %tmp2 = call <8 x i16> @llvm.arm.neon.vqneg.v8i16(<8 x i16> %tmp1)
107 define <4 x i32> @vqnegQs32(<4 x i32>* %A) nounwind {
110 %tmp1 = load <4 x i32>* %A
111 %tmp2 = call <4 x i32> @llvm.arm.neon.vqneg.v4i32(<4 x i32> %tmp1)
115 declare <8 x i8> @llvm.arm.neon.vqneg.v8i8(<8 x i8>) nounwind readnone
116 declare <4 x i16> @llvm.arm.neon.vqneg.v4i16(<4 x i16>) nounwind readnone
117 declare <2 x i32> @llvm.arm.neon.vqneg.v2i32(<2 x i32>) nounwind readnone
119 declare <16 x i8> @llvm.arm.neon.vqneg.v16i8(<16 x i8>) nounwind readnone
120 declare <8 x i16> @llvm.arm.neon.vqneg.v8i16(<8 x i16>) nounwind readnone
121 declare <4 x i32> @llvm.arm.neon.vqneg.v4i32(<4 x i32>) nounwind readnone