1 ; RUN: llc -mtriple=arm -mattr=+neon %s -o - | FileCheck %s
3 ; Check codegen for 64-bit icmp operations, which don't directly map to any
6 define <2 x i64> @vne(<2 x i64>* %A, <2 x i64>* %B) nounwind {
14 ;CHECK-NEXT: mov pc, lr
15 %tmp1 = load <2 x i64>, <2 x i64>* %A
16 %tmp2 = load <2 x i64>, <2 x i64>* %B
17 %tmp3 = icmp ne <2 x i64> %tmp1, %tmp2
18 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
22 define <2 x i64> @veq(<2 x i64>* %A, <2 x i64>* %B) nounwind {
25 ;CHECK-NEXT: vrev64.32
29 ;CHECK-NEXT: mov pc, lr
30 %tmp1 = load <2 x i64>, <2 x i64>* %A
31 %tmp2 = load <2 x i64>, <2 x i64>* %B
32 %tmp3 = icmp eq <2 x i64> %tmp1, %tmp2
33 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
37 ; FIXME: We currently generate terrible code for this.
38 ; (Atop < Btop) | ((ATop == BTop) & (ABottom < BBottom))
39 ; would come out to roughly 6 instructions, but we currently
41 define <2 x i64> @vult(<2 x i64>* %A, <2 x i64>* %B) nounwind {
47 %tmp1 = load <2 x i64>, <2 x i64>* %A
48 %tmp2 = load <2 x i64>, <2 x i64>* %B
49 %tmp3 = icmp ult <2 x i64> %tmp1, %tmp2
50 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>