1 ; RUN: llc -enable-machine-outliner=never -verify-machineinstrs %s -o - -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s
2 ; RUN: llc -global-isel -enable-machine-outliner=never -verify-machineinstrs %s -o - -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s --check-prefix=GISEL
4 ; FIXME: GISel only knows how to handle explicit G_SEXT instructions. So when
5 ; G_SEXT is lowered to anything else, it won't fold in a stx*.
6 ; FIXME: GISel doesn't currently handle folding the addressing mode into a cmp.
13 define void @addsub_i8rhs() minsize {
14 ; CHECK-LABEL: addsub_i8rhs:
15 ; GISEL-LABEL: addsub_i8rhs:
16 %val8_tmp = load i8, i8* @var8
17 %lhs32 = load i32, i32* @var32
18 %lhs64 = load i64, i64* @var64
20 ; Need this to prevent extension upon load and give a vanilla i8 operand.
21 %val8 = add i8 %val8_tmp, 123
24 ; Zero-extending to 32-bits
25 %rhs32_zext = zext i8 %val8 to i32
26 %res32_zext = add i32 %lhs32, %rhs32_zext
27 store volatile i32 %res32_zext, i32* @var32
28 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxtb
29 ; GISEL: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxtb
31 %rhs32_zext_shift = shl i32 %rhs32_zext, 3
32 %res32_zext_shift = add i32 %lhs32, %rhs32_zext_shift
33 store volatile i32 %res32_zext_shift, i32* @var32
34 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxtb #3
35 ; GISEL: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxtb #3
37 ; Zero-extending to 64-bits
38 %rhs64_zext = zext i8 %val8 to i64
39 %res64_zext = add i64 %lhs64, %rhs64_zext
40 store volatile i64 %res64_zext, i64* @var64
41 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtb
42 ; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtb
44 %rhs64_zext_shift = shl i64 %rhs64_zext, 1
45 %res64_zext_shift = add i64 %lhs64, %rhs64_zext_shift
46 store volatile i64 %res64_zext_shift, i64* @var64
47 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtb #1
48 ; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtb #1
50 ; Sign-extending to 32-bits
51 %rhs32_sext = sext i8 %val8 to i32
52 %res32_sext = add i32 %lhs32, %rhs32_sext
53 store volatile i32 %res32_sext, i32* @var32
54 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, sxtb
56 %rhs32_sext_shift = shl i32 %rhs32_sext, 1
57 %res32_sext_shift = add i32 %lhs32, %rhs32_sext_shift
58 store volatile i32 %res32_sext_shift, i32* @var32
59 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, sxtb #1
61 ; Sign-extending to 64-bits
62 %rhs64_sext = sext i8 %val8 to i64
63 %res64_sext = add i64 %lhs64, %rhs64_sext
64 store volatile i64 %res64_sext, i64* @var64
65 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtb
67 %rhs64_sext_shift = shl i64 %rhs64_sext, 4
68 %res64_sext_shift = add i64 %lhs64, %rhs64_sext_shift
69 store volatile i64 %res64_sext_shift, i64* @var64
70 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtb #4
74 %tst = icmp slt i32 %lhs32, %rhs32_zext
75 br i1 %tst, label %end, label %test2
76 ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, uxtb
79 %cmp_sext = sext i8 %val8 to i64
80 %tst2 = icmp eq i64 %lhs64, %cmp_sext
81 br i1 %tst2, label %other, label %end
82 ; CHECK: cmp {{x[0-9]+}}, {{w[0-9]+}}, sxtb
85 store volatile i32 %lhs32, i32* @var32
92 define void @sub_i8rhs() minsize {
93 ; CHECK-LABEL: sub_i8rhs:
94 %val8_tmp = load i8, i8* @var8
95 %lhs32 = load i32, i32* @var32
96 %lhs64 = load i64, i64* @var64
98 ; Need this to prevent extension upon load and give a vanilla i8 operand.
99 %val8 = add i8 %val8_tmp, 123
102 ; Zero-extending to 32-bits
103 %rhs32_zext = zext i8 %val8 to i32
104 %res32_zext = sub i32 %lhs32, %rhs32_zext
105 store volatile i32 %res32_zext, i32* @var32
106 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxtb
107 ; GISEL: subs {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxtb
109 %rhs32_zext_shift = shl i32 %rhs32_zext, 3
110 %res32_zext_shift = sub i32 %lhs32, %rhs32_zext_shift
111 store volatile i32 %res32_zext_shift, i32* @var32
112 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxtb #3
113 ; GISEL: subs {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxtb #3
115 ; Zero-extending to 64-bits
116 %rhs64_zext = zext i8 %val8 to i64
117 %res64_zext = sub i64 %lhs64, %rhs64_zext
118 store volatile i64 %res64_zext, i64* @var64
119 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtb
120 ; GISEL: subs {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtb
122 %rhs64_zext_shift = shl i64 %rhs64_zext, 1
123 %res64_zext_shift = sub i64 %lhs64, %rhs64_zext_shift
124 store volatile i64 %res64_zext_shift, i64* @var64
125 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtb #1
126 ; GISEL: subs {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtb #1
128 ; Sign-extending to 32-bits
129 %rhs32_sext = sext i8 %val8 to i32
130 %res32_sext = sub i32 %lhs32, %rhs32_sext
131 store volatile i32 %res32_sext, i32* @var32
132 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, sxtb
134 %rhs32_sext_shift = shl i32 %rhs32_sext, 1
135 %res32_sext_shift = sub i32 %lhs32, %rhs32_sext_shift
136 store volatile i32 %res32_sext_shift, i32* @var32
137 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, sxtb #1
139 ; Sign-extending to 64-bits
140 %rhs64_sext = sext i8 %val8 to i64
141 %res64_sext = sub i64 %lhs64, %rhs64_sext
142 store volatile i64 %res64_sext, i64* @var64
143 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtb
145 %rhs64_sext_shift = shl i64 %rhs64_sext, 4
146 %res64_sext_shift = sub i64 %lhs64, %rhs64_sext_shift
147 store volatile i64 %res64_sext_shift, i64* @var64
148 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtb #4
153 define void @addsub_i16rhs() minsize {
154 ; CHECK-LABEL: addsub_i16rhs:
155 ; GISEL-LABEL: addsub_i16rhs:
156 %val16_tmp = load i16, i16* @var16
157 %lhs32 = load i32, i32* @var32
158 %lhs64 = load i64, i64* @var64
160 ; Need this to prevent extension upon load and give a vanilla i16 operand.
161 %val16 = add i16 %val16_tmp, 123
164 ; Zero-extending to 32-bits
165 %rhs32_zext = zext i16 %val16 to i32
166 %res32_zext = add i32 %lhs32, %rhs32_zext
167 store volatile i32 %res32_zext, i32* @var32
168 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxth
169 ; GISEL: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxth
171 %rhs32_zext_shift = shl i32 %rhs32_zext, 3
172 %res32_zext_shift = add i32 %lhs32, %rhs32_zext_shift
173 store volatile i32 %res32_zext_shift, i32* @var32
174 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxth #3
175 ; GISEL: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxth #3
177 ; Zero-extending to 64-bits
178 %rhs64_zext = zext i16 %val16 to i64
179 %res64_zext = add i64 %lhs64, %rhs64_zext
180 store volatile i64 %res64_zext, i64* @var64
181 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxth
182 ; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxth
184 %rhs64_zext_shift = shl i64 %rhs64_zext, 1
185 %res64_zext_shift = add i64 %lhs64, %rhs64_zext_shift
186 store volatile i64 %res64_zext_shift, i64* @var64
187 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxth #1
188 ; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxth #1
190 ; Sign-extending to 32-bits
191 %rhs32_sext = sext i16 %val16 to i32
192 %res32_sext = add i32 %lhs32, %rhs32_sext
193 store volatile i32 %res32_sext, i32* @var32
194 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, sxth
196 %rhs32_sext_shift = shl i32 %rhs32_sext, 1
197 %res32_sext_shift = add i32 %lhs32, %rhs32_sext_shift
198 store volatile i32 %res32_sext_shift, i32* @var32
199 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, sxth #1
201 ; Sign-extending to 64-bits
202 %rhs64_sext = sext i16 %val16 to i64
203 %res64_sext = add i64 %lhs64, %rhs64_sext
204 store volatile i64 %res64_sext, i64* @var64
205 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxth
207 %rhs64_sext_shift = shl i64 %rhs64_sext, 4
208 %res64_sext_shift = add i64 %lhs64, %rhs64_sext_shift
209 store volatile i64 %res64_sext_shift, i64* @var64
210 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxth #4
214 %tst = icmp slt i32 %lhs32, %rhs32_zext
215 br i1 %tst, label %end, label %test2
216 ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, uxth
219 %cmp_sext = sext i16 %val16 to i64
220 %tst2 = icmp eq i64 %lhs64, %cmp_sext
221 br i1 %tst2, label %other, label %end
222 ; CHECK: cmp {{x[0-9]+}}, {{w[0-9]+}}, sxth
225 store volatile i32 %lhs32, i32* @var32
232 define void @sub_i16rhs() minsize {
233 ; CHECK-LABEL: sub_i16rhs:
234 ; GISEL-LABEL: sub_i16rhs:
235 %val16_tmp = load i16, i16* @var16
236 %lhs32 = load i32, i32* @var32
237 %lhs64 = load i64, i64* @var64
239 ; Need this to prevent extension upon load and give a vanilla i16 operand.
240 %val16 = add i16 %val16_tmp, 123
243 ; Zero-extending to 32-bits
244 %rhs32_zext = zext i16 %val16 to i32
245 %res32_zext = sub i32 %lhs32, %rhs32_zext
246 store volatile i32 %res32_zext, i32* @var32
247 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxth
248 ; GISEL: subs {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxth
250 %rhs32_zext_shift = shl i32 %rhs32_zext, 3
251 %res32_zext_shift = sub i32 %lhs32, %rhs32_zext_shift
252 store volatile i32 %res32_zext_shift, i32* @var32
253 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxth #3
254 ; GISEL: subs {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxth #3
256 ; Zero-extending to 64-bits
257 %rhs64_zext = zext i16 %val16 to i64
258 %res64_zext = sub i64 %lhs64, %rhs64_zext
259 store volatile i64 %res64_zext, i64* @var64
260 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxth
261 ; GISEL: subs {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxth
263 %rhs64_zext_shift = shl i64 %rhs64_zext, 1
264 %res64_zext_shift = sub i64 %lhs64, %rhs64_zext_shift
265 store volatile i64 %res64_zext_shift, i64* @var64
266 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxth #1
267 ; GISEL: subs {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxth #1
269 ; Sign-extending to 32-bits
270 %rhs32_sext = sext i16 %val16 to i32
271 %res32_sext = sub i32 %lhs32, %rhs32_sext
272 store volatile i32 %res32_sext, i32* @var32
273 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, sxth
275 %rhs32_sext_shift = shl i32 %rhs32_sext, 1
276 %res32_sext_shift = sub i32 %lhs32, %rhs32_sext_shift
277 store volatile i32 %res32_sext_shift, i32* @var32
278 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, sxth #1
280 ; Sign-extending to 64-bits
281 %rhs64_sext = sext i16 %val16 to i64
282 %res64_sext = sub i64 %lhs64, %rhs64_sext
283 store volatile i64 %res64_sext, i64* @var64
284 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxth
286 %rhs64_sext_shift = shl i64 %rhs64_sext, 4
287 %res64_sext_shift = sub i64 %lhs64, %rhs64_sext_shift
288 store volatile i64 %res64_sext_shift, i64* @var64
289 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxth #4
294 ; N.b. we could probably check more here ("add w2, w3, w1, uxtw" for
295 ; example), but the remaining instructions are probably not idiomatic
296 ; in the face of "add/sub (shifted register)" so I don't intend to.
297 define void @addsub_i32rhs(i32 %in32) minsize {
298 ; CHECK-LABEL: addsub_i32rhs:
299 ; GISEL-LABEL: addsub_i32rhs:
300 %val32_tmp = load i32, i32* @var32
301 %lhs64 = load i64, i64* @var64
303 %val32 = add i32 %val32_tmp, 123
305 %rhs64_zext = zext i32 %in32 to i64
306 %res64_zext = add i64 %lhs64, %rhs64_zext
307 store volatile i64 %res64_zext, i64* @var64
308 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtw
309 ; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtw
311 %rhs64_zext2 = zext i32 %val32 to i64
312 %rhs64_zext_shift = shl i64 %rhs64_zext2, 2
313 %res64_zext_shift = add i64 %lhs64, %rhs64_zext_shift
314 store volatile i64 %res64_zext_shift, i64* @var64
315 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtw #2
316 ; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtw #2
318 %rhs64_sext = sext i32 %val32 to i64
319 %res64_sext = add i64 %lhs64, %rhs64_sext
320 store volatile i64 %res64_sext, i64* @var64
321 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtw
322 ; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtw
324 %rhs64_sext_shift = shl i64 %rhs64_sext, 2
325 %res64_sext_shift = add i64 %lhs64, %rhs64_sext_shift
326 store volatile i64 %res64_sext_shift, i64* @var64
327 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtw #2
328 ; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtw #2
333 define void @sub_i32rhs(i32 %in32) minsize {
334 ; CHECK-LABEL: sub_i32rhs:
335 %val32_tmp = load i32, i32* @var32
336 %lhs64 = load i64, i64* @var64
338 %val32 = add i32 %val32_tmp, 123
340 %rhs64_zext = zext i32 %in32 to i64
341 %res64_zext = sub i64 %lhs64, %rhs64_zext
342 store volatile i64 %res64_zext, i64* @var64
343 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtw
344 ; GISEL: subs {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtw
346 %rhs64_zext2 = zext i32 %val32 to i64
347 %rhs64_zext_shift = shl i64 %rhs64_zext2, 2
348 %res64_zext_shift = sub i64 %lhs64, %rhs64_zext_shift
349 store volatile i64 %res64_zext_shift, i64* @var64
350 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtw #2
351 ; GISEL: subs {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtw #2
353 %rhs64_sext = sext i32 %val32 to i64
354 %res64_sext = sub i64 %lhs64, %rhs64_sext
355 store volatile i64 %res64_sext, i64* @var64
356 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtw
358 %rhs64_sext_shift = shl i64 %rhs64_sext, 2
359 %res64_sext_shift = sub i64 %lhs64, %rhs64_sext_shift
360 store volatile i64 %res64_sext_shift, i64* @var64
361 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtw #2
366 ; Check that implicit zext from w reg write is used instead of uxtw form of add.
367 define i64 @add_fold_uxtw(i32 %x, i64 %y) {
368 ; CHECK-LABEL: add_fold_uxtw:
369 ; GISEL-LABEL: add_fold_uxtw:
371 ; CHECK: and w[[TMP:[0-9]+]], w0, #0x3
372 ; GISEL: and w[[TMP:[0-9]+]], w0, #0x3
373 ; FIXME: Global ISel produces an unncessary ubfx here.
375 %ext = zext i32 %m to i64
376 ; CHECK-NEXT: add x0, x1, x[[TMP]]
377 ; GISEL: add x0, x1, x[[TMP]]
378 %ret = add i64 %y, %ext
382 ; Check that implicit zext from w reg write is used instead of uxtw
383 ; form of sub and that mov WZR is folded to form a neg instruction.
384 define i64 @sub_fold_uxtw_xzr(i32 %x) {
385 ; CHECK-LABEL: sub_fold_uxtw_xzr:
386 ; GISEL-LABEL: sub_fold_uxtw_xzr:
388 ; CHECK: and w[[TMP:[0-9]+]], w0, #0x3
389 ; GISEL: and w[[TMP:[0-9]+]], w0, #0x3
391 %ext = zext i32 %m to i64
392 ; CHECK-NEXT: neg x0, x[[TMP]]
393 ; GISEL: negs x0, x[[TMP]]
394 %ret = sub i64 0, %ext
398 ; Check that implicit zext from w reg write is used instead of uxtw form of subs/cmp.
399 define i1 @cmp_fold_uxtw(i32 %x, i64 %y) {
400 ; CHECK-LABEL: cmp_fold_uxtw:
402 ; CHECK: and w[[TMP:[0-9]+]], w0, #0x3
404 %ext = zext i32 %m to i64
405 ; CHECK-NEXT: cmp x1, x[[TMP]]
407 %ret = icmp eq i64 %y, %ext
411 ; Check that implicit zext from w reg write is used instead of uxtw
412 ; form of add, leading to madd selection.
413 define i64 @madd_fold_uxtw(i32 %x, i64 %y) {
414 ; CHECK-LABEL: madd_fold_uxtw:
415 ; GISEL-LABEL: madd_fold_uxtw:
417 ; CHECK: and w[[TMP:[0-9]+]], w0, #0x3
418 ; GISEL: and w[[TMP:[0-9]+]], w0, #0x3
420 %ext = zext i32 %m to i64
421 ; GISEL: madd x0, x1, x1, x[[TMP]]
422 ; CHECK-NEXT: madd x0, x1, x1, x[[TMP]]
423 %mul = mul i64 %y, %y
424 %ret = add i64 %mul, %ext
428 ; Check that implicit zext from w reg write is used instead of uxtw
429 ; form of sub, leading to sub/cmp folding.
430 ; Check that implicit zext from w reg write is used instead of uxtw form of subs/cmp.
431 define i1 @cmp_sub_fold_uxtw(i32 %x, i64 %y, i64 %z) {
432 ; CHECK-LABEL: cmp_sub_fold_uxtw:
434 ; CHECK: and w[[TMP:[0-9]+]], w0, #0x3
436 %ext = zext i32 %m to i64
437 ; CHECK-NEXT: cmp x[[TMP2:[0-9]+]], x[[TMP]]
439 %sub = sub i64 %z, %ext
440 %ret = icmp eq i64 %sub, 0
444 ; Check that implicit zext from w reg write is used instead of uxtw
445 ; form of add and add of -1 gets selected as sub.
446 define i64 @add_imm_fold_uxtw(i32 %x) {
447 ; CHECK-LABEL: add_imm_fold_uxtw:
448 ; GISEL-LABEL: add_imm_fold_uxtw:
450 ; CHECK: and w[[TMP:[0-9]+]], w0, #0x3
451 ; GISEL: and w[[TMP:[0-9]+]], w0, #0x3
453 %ext = zext i32 %m to i64
454 ; CHECK-NEXT: sub x0, x[[TMP]], #1
455 ; GISEL: subs x0, x[[TMP]], #1
456 %ret = add i64 %ext, -1
460 ; Check that implicit zext from w reg write is used instead of uxtw
461 ; form of add and add lsl form gets selected.
462 define i64 @add_lsl_fold_uxtw(i32 %x, i64 %y) {
463 ; CHECK-LABEL: add_lsl_fold_uxtw:
464 ; GISEL-LABEL: add_lsl_fold_uxtw:
466 ; CHECK: orr w[[TMP:[0-9]+]], w0, #0x3
467 ; GISEL: orr w[[TMP:[0-9]+]], w0, #0x3
469 %ext = zext i32 %m to i64
470 %shift = shl i64 %y, 3
471 ; CHECK-NEXT: add x0, x[[TMP]], x1, lsl #3
472 ; GISEL: add x0, x[[TMP]], x1, lsl #3
473 %ret = add i64 %ext, %shift