1 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
3 define <8 x i8> @rbit_8b(<8 x i8>* %A) nounwind {
6 %tmp1 = load <8 x i8>, <8 x i8>* %A
7 %tmp3 = call <8 x i8> @llvm.aarch64.neon.rbit.v8i8(<8 x i8> %tmp1)
11 define <16 x i8> @rbit_16b(<16 x i8>* %A) nounwind {
12 ;CHECK-LABEL: rbit_16b:
14 %tmp1 = load <16 x i8>, <16 x i8>* %A
15 %tmp3 = call <16 x i8> @llvm.aarch64.neon.rbit.v16i8(<16 x i8> %tmp1)
19 declare <8 x i8> @llvm.aarch64.neon.rbit.v8i8(<8 x i8>) nounwind readnone
20 declare <16 x i8> @llvm.aarch64.neon.rbit.v16i8(<16 x i8>) nounwind readnone
22 define <8 x i16> @sxtl8h(<8 x i8>* %A) nounwind {
25 %tmp1 = load <8 x i8>, <8 x i8>* %A
26 %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
30 define <8 x i16> @uxtl8h(<8 x i8>* %A) nounwind {
33 %tmp1 = load <8 x i8>, <8 x i8>* %A
34 %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
38 define <4 x i32> @sxtl4s(<4 x i16>* %A) nounwind {
41 %tmp1 = load <4 x i16>, <4 x i16>* %A
42 %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
46 define <4 x i32> @uxtl4s(<4 x i16>* %A) nounwind {
49 %tmp1 = load <4 x i16>, <4 x i16>* %A
50 %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
54 define <2 x i64> @sxtl2d(<2 x i32>* %A) nounwind {
57 %tmp1 = load <2 x i32>, <2 x i32>* %A
58 %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
62 define <2 x i64> @uxtl2d(<2 x i32>* %A) nounwind {
65 %tmp1 = load <2 x i32>, <2 x i32>* %A
66 %tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
70 ; Check for incorrect use of vector bic.
72 define void @test_vsliq(i8* nocapture %src, i8* nocapture %dest) nounwind noinline ssp {
74 ; CHECK-LABEL: test_vsliq:
76 ; CHECK: movi.2d [[REG1:v[0-9]+]], #0x0000ff000000ff
77 ; CHECK: and.16b v{{[0-9]+}}, v{{[0-9]+}}, [[REG1]]
78 %0 = bitcast i8* %src to <16 x i8>*
79 %1 = load <16 x i8>, <16 x i8>* %0, align 16
80 %and.i = and <16 x i8> %1, <i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 0, i8 0>
81 %2 = bitcast <16 x i8> %and.i to <8 x i16>
82 %vshl_n = shl <8 x i16> %2, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
83 %3 = or <8 x i16> %2, %vshl_n
84 %4 = bitcast <8 x i16> %3 to <4 x i32>
85 %vshl_n8 = shl <4 x i32> %4, <i32 16, i32 16, i32 16, i32 16>
86 %5 = or <4 x i32> %4, %vshl_n8
87 %6 = bitcast <4 x i32> %5 to <16 x i8>
88 %7 = bitcast i8* %dest to <16 x i8>*
89 store <16 x i8> %6, <16 x i8>* %7, align 16