1 ; RUN: llvm-as < %s | llc -march=arm -mattr=+neon > %t
2 ; RUN: grep {vmovl\\.s8} %t | count 1
3 ; RUN: grep {vmovl\\.s16} %t | count 1
4 ; RUN: grep {vmovl\\.s32} %t | count 1
5 ; RUN: grep {vmovl\\.u8} %t | count 1
6 ; RUN: grep {vmovl\\.u16} %t | count 1
7 ; RUN: grep {vmovl\\.u32} %t | count 1
9 define <8 x i16> @vmovls8(<8 x i8>* %A) nounwind {
10 %tmp1 = load <8 x i8>* %A
11 %tmp2 = call <8 x i16> @llvm.arm.neon.vmovls.v8i16(<8 x i8> %tmp1)
15 define <4 x i32> @vmovls16(<4 x i16>* %A) nounwind {
16 %tmp1 = load <4 x i16>* %A
17 %tmp2 = call <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16> %tmp1)
21 define <2 x i64> @vmovls32(<2 x i32>* %A) nounwind {
22 %tmp1 = load <2 x i32>* %A
23 %tmp2 = call <2 x i64> @llvm.arm.neon.vmovls.v2i64(<2 x i32> %tmp1)
27 define <8 x i16> @vmovlu8(<8 x i8>* %A) nounwind {
28 %tmp1 = load <8 x i8>* %A
29 %tmp2 = call <8 x i16> @llvm.arm.neon.vmovlu.v8i16(<8 x i8> %tmp1)
33 define <4 x i32> @vmovlu16(<4 x i16>* %A) nounwind {
34 %tmp1 = load <4 x i16>* %A
35 %tmp2 = call <4 x i32> @llvm.arm.neon.vmovlu.v4i32(<4 x i16> %tmp1)
39 define <2 x i64> @vmovlu32(<2 x i32>* %A) nounwind {
40 %tmp1 = load <2 x i32>* %A
41 %tmp2 = call <2 x i64> @llvm.arm.neon.vmovlu.v2i64(<2 x i32> %tmp1)
45 declare <8 x i16> @llvm.arm.neon.vmovls.v8i16(<8 x i8>) nounwind readnone
46 declare <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16>) nounwind readnone
47 declare <2 x i64> @llvm.arm.neon.vmovls.v2i64(<2 x i32>) nounwind readnone
49 declare <8 x i16> @llvm.arm.neon.vmovlu.v8i16(<8 x i8>) nounwind readnone
50 declare <4 x i32> @llvm.arm.neon.vmovlu.v4i32(<4 x i16>) nounwind readnone
51 declare <2 x i64> @llvm.arm.neon.vmovlu.v2i64(<2 x i32>) nounwind readnone