1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=arm-eabi -mattr=+neon | FileCheck %s
4 define <8 x i8> @vceqi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
7 ; CHECK-NEXT: vldr d16, [r1]
8 ; CHECK-NEXT: vldr d17, [r0]
9 ; CHECK-NEXT: vceq.i8 d16, d17, d16
10 ; CHECK-NEXT: vmov r0, r1, d16
11 ; CHECK-NEXT: mov pc, lr
12 %tmp1 = load <8 x i8>, <8 x i8>* %A
13 %tmp2 = load <8 x i8>, <8 x i8>* %B
14 %tmp3 = icmp eq <8 x i8> %tmp1, %tmp2
15 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
19 define <4 x i16> @vceqi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
20 ; CHECK-LABEL: vceqi16:
22 ; CHECK-NEXT: vldr d16, [r1]
23 ; CHECK-NEXT: vldr d17, [r0]
24 ; CHECK-NEXT: vceq.i16 d16, d17, d16
25 ; CHECK-NEXT: vmov r0, r1, d16
26 ; CHECK-NEXT: mov pc, lr
27 %tmp1 = load <4 x i16>, <4 x i16>* %A
28 %tmp2 = load <4 x i16>, <4 x i16>* %B
29 %tmp3 = icmp eq <4 x i16> %tmp1, %tmp2
30 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
34 define <2 x i32> @vceqi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
35 ; CHECK-LABEL: vceqi32:
37 ; CHECK-NEXT: vldr d16, [r1]
38 ; CHECK-NEXT: vldr d17, [r0]
39 ; CHECK-NEXT: vceq.i32 d16, d17, d16
40 ; CHECK-NEXT: vmov r0, r1, d16
41 ; CHECK-NEXT: mov pc, lr
42 %tmp1 = load <2 x i32>, <2 x i32>* %A
43 %tmp2 = load <2 x i32>, <2 x i32>* %B
44 %tmp3 = icmp eq <2 x i32> %tmp1, %tmp2
45 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
49 define <2 x i32> @vceqf32(<2 x float>* %A, <2 x float>* %B) nounwind {
50 ; CHECK-LABEL: vceqf32:
52 ; CHECK-NEXT: vldr d16, [r1]
53 ; CHECK-NEXT: vldr d17, [r0]
54 ; CHECK-NEXT: vceq.f32 d16, d17, d16
55 ; CHECK-NEXT: vmov r0, r1, d16
56 ; CHECK-NEXT: mov pc, lr
57 %tmp1 = load <2 x float>, <2 x float>* %A
58 %tmp2 = load <2 x float>, <2 x float>* %B
59 %tmp3 = fcmp oeq <2 x float> %tmp1, %tmp2
60 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
64 define <16 x i8> @vceqQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
65 ; CHECK-LABEL: vceqQi8:
67 ; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
68 ; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
69 ; CHECK-NEXT: vceq.i8 q8, q9, q8
70 ; CHECK-NEXT: vmov r0, r1, d16
71 ; CHECK-NEXT: vmov r2, r3, d17
72 ; CHECK-NEXT: mov pc, lr
73 %tmp1 = load <16 x i8>, <16 x i8>* %A
74 %tmp2 = load <16 x i8>, <16 x i8>* %B
75 %tmp3 = icmp eq <16 x i8> %tmp1, %tmp2
76 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
80 define <8 x i16> @vceqQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
81 ; CHECK-LABEL: vceqQi16:
83 ; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
84 ; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
85 ; CHECK-NEXT: vceq.i16 q8, q9, q8
86 ; CHECK-NEXT: vmov r0, r1, d16
87 ; CHECK-NEXT: vmov r2, r3, d17
88 ; CHECK-NEXT: mov pc, lr
89 %tmp1 = load <8 x i16>, <8 x i16>* %A
90 %tmp2 = load <8 x i16>, <8 x i16>* %B
91 %tmp3 = icmp eq <8 x i16> %tmp1, %tmp2
92 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
96 define <4 x i32> @vceqQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
97 ; CHECK-LABEL: vceqQi32:
99 ; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
100 ; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
101 ; CHECK-NEXT: vceq.i32 q8, q9, q8
102 ; CHECK-NEXT: vmov r0, r1, d16
103 ; CHECK-NEXT: vmov r2, r3, d17
104 ; CHECK-NEXT: mov pc, lr
105 %tmp1 = load <4 x i32>, <4 x i32>* %A
106 %tmp2 = load <4 x i32>, <4 x i32>* %B
107 %tmp3 = icmp eq <4 x i32> %tmp1, %tmp2
108 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
112 define <4 x i32> @vceqQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
113 ; CHECK-LABEL: vceqQf32:
115 ; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
116 ; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
117 ; CHECK-NEXT: vceq.f32 q8, q9, q8
118 ; CHECK-NEXT: vmov r0, r1, d16
119 ; CHECK-NEXT: vmov r2, r3, d17
120 ; CHECK-NEXT: mov pc, lr
121 %tmp1 = load <4 x float>, <4 x float>* %A
122 %tmp2 = load <4 x float>, <4 x float>* %B
123 %tmp3 = fcmp oeq <4 x float> %tmp1, %tmp2
124 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
128 define <8 x i8> @vceqi8Z(<8 x i8>* %A) nounwind {
129 ; CHECK-LABEL: vceqi8Z:
131 ; CHECK-NEXT: vldr d16, [r0]
132 ; CHECK-NEXT: vceq.i8 d16, d16, #0
133 ; CHECK-NEXT: vmov r0, r1, d16
134 ; CHECK-NEXT: mov pc, lr
135 %tmp1 = load <8 x i8>, <8 x i8>* %A
136 %tmp3 = icmp eq <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
137 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>