1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=thumbv7k-linux-gnu | FileCheck %s
4 declare {<2 x i64>, <2 x i1>} @llvm.uadd.with.overflow.v2i64(<2 x i64>, <2 x i64>)
5 declare {<2 x i64>, <2 x i1>} @llvm.usub.with.overflow.v2i64(<2 x i64>, <2 x i64>)
6 declare {<2 x i64>, <2 x i1>} @llvm.sadd.with.overflow.v2i64(<2 x i64>, <2 x i64>)
7 declare {<2 x i64>, <2 x i1>} @llvm.ssub.with.overflow.v2i64(<2 x i64>, <2 x i64>)
9 define <2 x i1> @uaddo(<2 x i64> *%ptr, <2 x i64> *%ptr2) {
12 ; CHECK-NEXT: push {r4, r5, r6, r7, lr}
13 ; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
14 ; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
15 ; CHECK-NEXT: movs r1, #0
16 ; CHECK-NEXT: vadd.i64 q8, q9, q8
17 ; CHECK-NEXT: vmov.32 r3, d18[0]
18 ; CHECK-NEXT: vmov.32 r2, d18[1]
19 ; CHECK-NEXT: vmov.32 r12, d16[0]
20 ; CHECK-NEXT: vmov.32 lr, d16[1]
21 ; CHECK-NEXT: vmov.32 r4, d17[0]
22 ; CHECK-NEXT: vmov.32 r5, d19[0]
23 ; CHECK-NEXT: vmov.32 r6, d17[1]
24 ; CHECK-NEXT: vmov.32 r7, d19[1]
25 ; CHECK-NEXT: subs.w r3, r12, r3
26 ; CHECK-NEXT: sbcs.w r2, lr, r2
27 ; CHECK-NEXT: mov.w r2, #0
29 ; CHECK-NEXT: movlo r2, #1
30 ; CHECK-NEXT: cmp r2, #0
32 ; CHECK-NEXT: movne.w r2, #-1
33 ; CHECK-NEXT: subs r3, r4, r5
34 ; CHECK-NEXT: sbcs.w r3, r6, r7
36 ; CHECK-NEXT: movlo r1, #1
37 ; CHECK-NEXT: cmp r1, #0
39 ; CHECK-NEXT: movne.w r1, #-1
40 ; CHECK-NEXT: vst1.64 {d16, d17}, [r0]
41 ; CHECK-NEXT: mov r0, r2
42 ; CHECK-NEXT: pop {r4, r5, r6, r7, pc}
43 %x = load <2 x i64>, <2 x i64>* %ptr, align 8
44 %y = load <2 x i64>, <2 x i64>* %ptr2, align 8
45 %s = call {<2 x i64>, <2 x i1>} @llvm.uadd.with.overflow.v2i64(<2 x i64> %x, <2 x i64> %y)
46 %m = extractvalue {<2 x i64>, <2 x i1>} %s, 0
47 %o = extractvalue {<2 x i64>, <2 x i1>} %s, 1
48 store <2 x i64> %m, <2 x i64>* %ptr
52 define <2 x i1> @usubo(<2 x i64> *%ptr, <2 x i64> *%ptr2) {
55 ; CHECK-NEXT: push {r4, r5, r6, r7, lr}
56 ; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
57 ; CHECK-NEXT: movs r1, #0
58 ; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
59 ; CHECK-NEXT: vsub.i64 q8, q9, q8
60 ; CHECK-NEXT: vmov.32 r12, d18[0]
61 ; CHECK-NEXT: vmov.32 lr, d18[1]
62 ; CHECK-NEXT: vmov.32 r3, d16[0]
63 ; CHECK-NEXT: vmov.32 r2, d16[1]
64 ; CHECK-NEXT: vmov.32 r4, d19[0]
65 ; CHECK-NEXT: vmov.32 r5, d17[0]
66 ; CHECK-NEXT: vmov.32 r6, d19[1]
67 ; CHECK-NEXT: vmov.32 r7, d17[1]
68 ; CHECK-NEXT: subs.w r3, r12, r3
69 ; CHECK-NEXT: sbcs.w r2, lr, r2
70 ; CHECK-NEXT: mov.w r2, #0
72 ; CHECK-NEXT: movlo r2, #1
73 ; CHECK-NEXT: cmp r2, #0
75 ; CHECK-NEXT: movne.w r2, #-1
76 ; CHECK-NEXT: subs r3, r4, r5
77 ; CHECK-NEXT: sbcs.w r3, r6, r7
79 ; CHECK-NEXT: movlo r1, #1
80 ; CHECK-NEXT: cmp r1, #0
82 ; CHECK-NEXT: movne.w r1, #-1
83 ; CHECK-NEXT: vst1.64 {d16, d17}, [r0]
84 ; CHECK-NEXT: mov r0, r2
85 ; CHECK-NEXT: pop {r4, r5, r6, r7, pc}
86 %x = load <2 x i64>, <2 x i64>* %ptr, align 8
87 %y = load <2 x i64>, <2 x i64>* %ptr2, align 8
88 %s = call {<2 x i64>, <2 x i1>} @llvm.usub.with.overflow.v2i64(<2 x i64> %x, <2 x i64> %y)
89 %m = extractvalue {<2 x i64>, <2 x i1>} %s, 0
90 %o = extractvalue {<2 x i64>, <2 x i1>} %s, 1
91 store <2 x i64> %m, <2 x i64>* %ptr
95 define <2 x i1> @saddo(<2 x i64> *%ptr, <2 x i64> *%ptr2) {
98 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr}
99 ; CHECK-NEXT: vld1.64 {d20, d21}, [r0]
100 ; CHECK-NEXT: movs r3, #0
101 ; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
102 ; CHECK-NEXT: vadd.i64 q8, q10, q9
103 ; CHECK-NEXT: vmov.32 r2, d20[0]
104 ; CHECK-NEXT: vmov.32 r1, d20[1]
105 ; CHECK-NEXT: vmov.32 r12, d16[0]
106 ; CHECK-NEXT: vmov.32 r8, d16[1]
107 ; CHECK-NEXT: vmov.32 lr, d17[0]
108 ; CHECK-NEXT: vmov.32 r4, d21[0]
109 ; CHECK-NEXT: vmov.32 r5, d17[1]
110 ; CHECK-NEXT: vmov.32 r6, d18[1]
111 ; CHECK-NEXT: vmov.32 r7, d21[1]
112 ; CHECK-NEXT: subs.w r2, r12, r2
113 ; CHECK-NEXT: vmov.32 r2, d19[1]
114 ; CHECK-NEXT: sbcs.w r1, r8, r1
115 ; CHECK-NEXT: mov.w r1, #0
117 ; CHECK-NEXT: movlt r1, #1
118 ; CHECK-NEXT: subs.w r4, lr, r4
119 ; CHECK-NEXT: sbcs.w r7, r5, r7
121 ; CHECK-NEXT: movlt r3, #1
122 ; CHECK-NEXT: cmp r3, #0
124 ; CHECK-NEXT: movne.w r3, #-1
125 ; CHECK-NEXT: asrs r7, r6, #31
126 ; CHECK-NEXT: vdup.32 d21, r3
127 ; CHECK-NEXT: cmp r1, #0
129 ; CHECK-NEXT: movne.w r1, #-1
130 ; CHECK-NEXT: vdup.32 d20, r1
131 ; CHECK-NEXT: vst1.64 {d16, d17}, [r0]
132 ; CHECK-NEXT: asrs r2, r2, #31
133 ; CHECK-NEXT: vdup.32 d19, r2
134 ; CHECK-NEXT: vdup.32 d18, r7
135 ; CHECK-NEXT: veor q9, q9, q10
136 ; CHECK-NEXT: vmovn.i64 d18, q9
137 ; CHECK-NEXT: vmov r2, r1, d18
138 ; CHECK-NEXT: mov r0, r2
139 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc}
140 %x = load <2 x i64>, <2 x i64>* %ptr, align 8
141 %y = load <2 x i64>, <2 x i64>* %ptr2, align 8
142 %s = call {<2 x i64>, <2 x i1>} @llvm.sadd.with.overflow.v2i64(<2 x i64> %x, <2 x i64> %y)
143 %m = extractvalue {<2 x i64>, <2 x i1>} %s, 0
144 %o = extractvalue {<2 x i64>, <2 x i1>} %s, 1
145 store <2 x i64> %m, <2 x i64>* %ptr
149 define <2 x i1> @ssubo(<2 x i64> *%ptr, <2 x i64> *%ptr2) {
150 ; CHECK-LABEL: ssubo:
152 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr}
153 ; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
154 ; CHECK-NEXT: movs r2, #0
155 ; CHECK-NEXT: vld1.64 {d20, d21}, [r0]
156 ; CHECK-NEXT: vsub.i64 q8, q10, q9
157 ; CHECK-NEXT: vmov.32 r1, d20[0]
158 ; CHECK-NEXT: vmov.32 r12, d20[1]
159 ; CHECK-NEXT: vmov.32 r3, d16[0]
160 ; CHECK-NEXT: vmov.32 lr, d16[1]
161 ; CHECK-NEXT: vmov.32 r4, d21[0]
162 ; CHECK-NEXT: vmov.32 r5, d17[0]
163 ; CHECK-NEXT: vmov.32 r6, d21[1]
164 ; CHECK-NEXT: vmov.32 r7, d17[1]
165 ; CHECK-NEXT: vmov.32 r8, d18[1]
166 ; CHECK-NEXT: subs r1, r3, r1
167 ; CHECK-NEXT: vmov.32 r3, d18[0]
168 ; CHECK-NEXT: sbcs.w r1, lr, r12
169 ; CHECK-NEXT: vmov.32 r12, d19[0]
170 ; CHECK-NEXT: mov.w r1, #0
172 ; CHECK-NEXT: movlt r1, #1
173 ; CHECK-NEXT: subs r5, r5, r4
174 ; CHECK-NEXT: vmov.32 r5, d19[1]
175 ; CHECK-NEXT: sbcs r7, r6
176 ; CHECK-NEXT: mov.w r7, #0
178 ; CHECK-NEXT: movlt r7, #1
179 ; CHECK-NEXT: cmp r7, #0
181 ; CHECK-NEXT: movne.w r7, #-1
182 ; CHECK-NEXT: vdup.32 d21, r7
183 ; CHECK-NEXT: rsbs r3, r3, #0
184 ; CHECK-NEXT: sbcs.w r3, r2, r8
185 ; CHECK-NEXT: mov.w r3, #0
187 ; CHECK-NEXT: movlt r3, #1
188 ; CHECK-NEXT: rsbs.w r6, r12, #0
189 ; CHECK-NEXT: sbcs.w r6, r2, r5
191 ; CHECK-NEXT: movlt r2, #1
192 ; CHECK-NEXT: cmp r2, #0
194 ; CHECK-NEXT: movne.w r2, #-1
195 ; CHECK-NEXT: cmp r3, #0
196 ; CHECK-NEXT: vdup.32 d19, r2
198 ; CHECK-NEXT: movne.w r3, #-1
199 ; CHECK-NEXT: cmp r1, #0
201 ; CHECK-NEXT: movne.w r1, #-1
202 ; CHECK-NEXT: vdup.32 d18, r3
203 ; CHECK-NEXT: vdup.32 d20, r1
204 ; CHECK-NEXT: veor q9, q9, q10
205 ; CHECK-NEXT: vst1.64 {d16, d17}, [r0]
206 ; CHECK-NEXT: vmovn.i64 d18, q9
207 ; CHECK-NEXT: vmov r2, r1, d18
208 ; CHECK-NEXT: mov r0, r2
209 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc}
210 %x = load <2 x i64>, <2 x i64>* %ptr, align 8
211 %y = load <2 x i64>, <2 x i64>* %ptr2, align 8
212 %s = call {<2 x i64>, <2 x i1>} @llvm.ssub.with.overflow.v2i64(<2 x i64> %x, <2 x i64> %y)
213 %m = extractvalue {<2 x i64>, <2 x i1>} %s, 0
214 %o = extractvalue {<2 x i64>, <2 x i1>} %s, 1
215 store <2 x i64> %m, <2 x i64>* %ptr