1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
4 ; https://bugs.llvm.org/show_bug.cgi?id=38149
6 ; We are truncating from wider width, and then sign-extending
7 ; back to the original width. Then we equality-comparing orig and src.
8 ; If they don't match, then we had signed truncation during truncation.
10 ; This can be expressed in a several ways in IR:
11 ; trunc + sext + icmp eq <- not canonical
12 ; shl + ashr + icmp eq
15 ; However only the simplest form (with two shifts) gets lowered best.
17 ; ---------------------------------------------------------------------------- ;
18 ; shl + ashr + icmp eq
19 ; ---------------------------------------------------------------------------- ;
21 define i1 @shifts_eqcmp_i16_i8(i16 %x) nounwind {
22 ; CHECK-LABEL: shifts_eqcmp_i16_i8:
24 ; CHECK-NEXT: sxtb w8, w0
25 ; CHECK-NEXT: and w8, w8, #0xffff
26 ; CHECK-NEXT: cmp w8, w0, uxth
27 ; CHECK-NEXT: cset w0, eq
29 %tmp0 = shl i16 %x, 8 ; 16-8
30 %tmp1 = ashr exact i16 %tmp0, 8 ; 16-8
31 %tmp2 = icmp eq i16 %tmp1, %x
35 define i1 @shifts_eqcmp_i32_i16(i32 %x) nounwind {
36 ; CHECK-LABEL: shifts_eqcmp_i32_i16:
38 ; CHECK-NEXT: cmp w0, w0, sxth
39 ; CHECK-NEXT: cset w0, eq
41 %tmp0 = shl i32 %x, 16 ; 32-16
42 %tmp1 = ashr exact i32 %tmp0, 16 ; 32-16
43 %tmp2 = icmp eq i32 %tmp1, %x
47 define i1 @shifts_eqcmp_i32_i8(i32 %x) nounwind {
48 ; CHECK-LABEL: shifts_eqcmp_i32_i8:
50 ; CHECK-NEXT: cmp w0, w0, sxtb
51 ; CHECK-NEXT: cset w0, eq
53 %tmp0 = shl i32 %x, 24 ; 32-8
54 %tmp1 = ashr exact i32 %tmp0, 24 ; 32-8
55 %tmp2 = icmp eq i32 %tmp1, %x
59 define i1 @shifts_eqcmp_i64_i32(i64 %x) nounwind {
60 ; CHECK-LABEL: shifts_eqcmp_i64_i32:
62 ; CHECK-NEXT: cmp x0, w0, sxtw
63 ; CHECK-NEXT: cset w0, eq
65 %tmp0 = shl i64 %x, 32 ; 64-32
66 %tmp1 = ashr exact i64 %tmp0, 32 ; 64-32
67 %tmp2 = icmp eq i64 %tmp1, %x
71 define i1 @shifts_eqcmp_i64_i16(i64 %x) nounwind {
72 ; CHECK-LABEL: shifts_eqcmp_i64_i16:
74 ; CHECK-NEXT: cmp x0, w0, sxth
75 ; CHECK-NEXT: cset w0, eq
77 %tmp0 = shl i64 %x, 48 ; 64-16
78 %tmp1 = ashr exact i64 %tmp0, 48 ; 64-16
79 %tmp2 = icmp eq i64 %tmp1, %x
83 define i1 @shifts_eqcmp_i64_i8(i64 %x) nounwind {
84 ; CHECK-LABEL: shifts_eqcmp_i64_i8:
86 ; CHECK-NEXT: cmp x0, w0, sxtb
87 ; CHECK-NEXT: cset w0, eq
89 %tmp0 = shl i64 %x, 56 ; 64-8
90 %tmp1 = ashr exact i64 %tmp0, 56 ; 64-8
91 %tmp2 = icmp eq i64 %tmp1, %x
95 ; ---------------------------------------------------------------------------- ;
97 ; ---------------------------------------------------------------------------- ;
99 define i1 @add_ugecmp_i16_i8(i16 %x) nounwind {
100 ; CHECK-LABEL: add_ugecmp_i16_i8:
102 ; CHECK-NEXT: sxtb w8, w0
103 ; CHECK-NEXT: and w8, w8, #0xffff
104 ; CHECK-NEXT: cmp w8, w0, uxth
105 ; CHECK-NEXT: cset w0, eq
107 %tmp0 = add i16 %x, -128 ; ~0U << (8-1)
108 %tmp1 = icmp uge i16 %tmp0, -256 ; ~0U << 8
112 define i1 @add_ugecmp_i32_i16(i32 %x) nounwind {
113 ; CHECK-LABEL: add_ugecmp_i32_i16:
115 ; CHECK-NEXT: cmp w0, w0, sxth
116 ; CHECK-NEXT: cset w0, eq
118 %tmp0 = add i32 %x, -32768 ; ~0U << (16-1)
119 %tmp1 = icmp uge i32 %tmp0, -65536 ; ~0U << 16
123 define i1 @add_ugecmp_i32_i8(i32 %x) nounwind {
124 ; CHECK-LABEL: add_ugecmp_i32_i8:
126 ; CHECK-NEXT: cmp w0, w0, sxtb
127 ; CHECK-NEXT: cset w0, eq
129 %tmp0 = add i32 %x, -128 ; ~0U << (8-1)
130 %tmp1 = icmp uge i32 %tmp0, -256 ; ~0U << 8
134 define i1 @add_ugecmp_i64_i32(i64 %x) nounwind {
135 ; CHECK-LABEL: add_ugecmp_i64_i32:
137 ; CHECK-NEXT: cmp x0, w0, sxtw
138 ; CHECK-NEXT: cset w0, eq
140 %tmp0 = add i64 %x, -2147483648 ; ~0U << (32-1)
141 %tmp1 = icmp uge i64 %tmp0, -4294967296 ; ~0U << 32
145 define i1 @add_ugecmp_i64_i16(i64 %x) nounwind {
146 ; CHECK-LABEL: add_ugecmp_i64_i16:
148 ; CHECK-NEXT: cmp x0, w0, sxth
149 ; CHECK-NEXT: cset w0, eq
151 %tmp0 = add i64 %x, -32768 ; ~0U << (16-1)
152 %tmp1 = icmp uge i64 %tmp0, -65536 ; ~0U << 16
156 define i1 @add_ugecmp_i64_i8(i64 %x) nounwind {
157 ; CHECK-LABEL: add_ugecmp_i64_i8:
159 ; CHECK-NEXT: cmp x0, w0, sxtb
160 ; CHECK-NEXT: cset w0, eq
162 %tmp0 = add i64 %x, -128 ; ~0U << (8-1)
163 %tmp1 = icmp uge i64 %tmp0, -256 ; ~0U << 8
167 ; Slightly more canonical variant
168 define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind {
169 ; CHECK-LABEL: add_ugtcmp_i16_i8:
171 ; CHECK-NEXT: sxtb w8, w0
172 ; CHECK-NEXT: and w8, w8, #0xffff
173 ; CHECK-NEXT: cmp w8, w0, uxth
174 ; CHECK-NEXT: cset w0, eq
176 %tmp0 = add i16 %x, -128 ; ~0U << (8-1)
177 %tmp1 = icmp ugt i16 %tmp0, -257 ; ~0U << 8 - 1
181 ; ---------------------------------------------------------------------------- ;
183 ; ---------------------------------------------------------------------------- ;
185 define i1 @add_ultcmp_i16_i8(i16 %x) nounwind {
186 ; CHECK-LABEL: add_ultcmp_i16_i8:
188 ; CHECK-NEXT: sxtb w8, w0
189 ; CHECK-NEXT: and w8, w8, #0xffff
190 ; CHECK-NEXT: cmp w8, w0, uxth
191 ; CHECK-NEXT: cset w0, eq
193 %tmp0 = add i16 %x, 128 ; 1U << (8-1)
194 %tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8
198 define i1 @add_ultcmp_i32_i16(i32 %x) nounwind {
199 ; CHECK-LABEL: add_ultcmp_i32_i16:
201 ; CHECK-NEXT: cmp w0, w0, sxth
202 ; CHECK-NEXT: cset w0, eq
204 %tmp0 = add i32 %x, 32768 ; 1U << (16-1)
205 %tmp1 = icmp ult i32 %tmp0, 65536 ; 1U << 16
209 define i1 @add_ultcmp_i32_i8(i32 %x) nounwind {
210 ; CHECK-LABEL: add_ultcmp_i32_i8:
212 ; CHECK-NEXT: cmp w0, w0, sxtb
213 ; CHECK-NEXT: cset w0, eq
215 %tmp0 = add i32 %x, 128 ; 1U << (8-1)
216 %tmp1 = icmp ult i32 %tmp0, 256 ; 1U << 8
220 define i1 @add_ultcmp_i64_i32(i64 %x) nounwind {
221 ; CHECK-LABEL: add_ultcmp_i64_i32:
223 ; CHECK-NEXT: cmp x0, w0, sxtw
224 ; CHECK-NEXT: cset w0, eq
226 %tmp0 = add i64 %x, 2147483648 ; 1U << (32-1)
227 %tmp1 = icmp ult i64 %tmp0, 4294967296 ; 1U << 32
231 define i1 @add_ultcmp_i64_i16(i64 %x) nounwind {
232 ; CHECK-LABEL: add_ultcmp_i64_i16:
234 ; CHECK-NEXT: cmp x0, w0, sxth
235 ; CHECK-NEXT: cset w0, eq
237 %tmp0 = add i64 %x, 32768 ; 1U << (16-1)
238 %tmp1 = icmp ult i64 %tmp0, 65536 ; 1U << 16
242 define i1 @add_ultcmp_i64_i8(i64 %x) nounwind {
243 ; CHECK-LABEL: add_ultcmp_i64_i8:
245 ; CHECK-NEXT: cmp x0, w0, sxtb
246 ; CHECK-NEXT: cset w0, eq
248 %tmp0 = add i64 %x, 128 ; 1U << (8-1)
249 %tmp1 = icmp ult i64 %tmp0, 256 ; 1U << 8
253 ; Slightly more canonical variant
254 define i1 @add_ulecmp_i16_i8(i16 %x) nounwind {
255 ; CHECK-LABEL: add_ulecmp_i16_i8:
257 ; CHECK-NEXT: sxtb w8, w0
258 ; CHECK-NEXT: and w8, w8, #0xffff
259 ; CHECK-NEXT: cmp w8, w0, uxth
260 ; CHECK-NEXT: cset w0, eq
262 %tmp0 = add i16 %x, 128 ; 1U << (8-1)
263 %tmp1 = icmp ule i16 %tmp0, 255 ; (1U << 8) - 1
268 ; ---------------------------------------------------------------------------- ;
270 ; Adding not a constant
271 define i1 @add_ultcmp_bad_i16_i8_add(i16 %x, i16 %y) nounwind {
272 ; CHECK-LABEL: add_ultcmp_bad_i16_i8_add:
274 ; CHECK-NEXT: add w8, w0, w1
275 ; CHECK-NEXT: and w8, w8, #0xffff
276 ; CHECK-NEXT: cmp w8, #256 // =256
277 ; CHECK-NEXT: cset w0, lo
279 %tmp0 = add i16 %x, %y
280 %tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8
284 ; Comparing not with a constant
285 define i1 @add_ultcmp_bad_i16_i8_cmp(i16 %x, i16 %y) nounwind {
286 ; CHECK-LABEL: add_ultcmp_bad_i16_i8_cmp:
288 ; CHECK-NEXT: add w8, w0, #128 // =128
289 ; CHECK-NEXT: and w8, w8, #0xffff
290 ; CHECK-NEXT: cmp w8, w1, uxth
291 ; CHECK-NEXT: cset w0, lo
293 %tmp0 = add i16 %x, 128 ; 1U << (8-1)
294 %tmp1 = icmp ult i16 %tmp0, %y
298 ; Second constant is not larger than the first one
299 define i1 @add_ultcmp_bad_i8_i16(i16 %x) nounwind {
300 ; CHECK-LABEL: add_ultcmp_bad_i8_i16:
302 ; CHECK-NEXT: and w8, w0, #0xffff
303 ; CHECK-NEXT: add w8, w8, #128 // =128
304 ; CHECK-NEXT: lsr w0, w8, #16
306 %tmp0 = add i16 %x, 128 ; 1U << (8-1)
307 %tmp1 = icmp ult i16 %tmp0, 128 ; 1U << (8-1)
311 ; First constant is not power of two
312 define i1 @add_ultcmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
313 ; CHECK-LABEL: add_ultcmp_bad_i16_i8_c0notpoweroftwo:
315 ; CHECK-NEXT: add w8, w0, #192 // =192
316 ; CHECK-NEXT: and w8, w8, #0xffff
317 ; CHECK-NEXT: cmp w8, #256 // =256
318 ; CHECK-NEXT: cset w0, lo
320 %tmp0 = add i16 %x, 192 ; (1U << (8-1)) + (1U << (8-1-1))
321 %tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8
325 ; Second constant is not power of two
326 define i1 @add_ultcmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
327 ; CHECK-LABEL: add_ultcmp_bad_i16_i8_c1notpoweroftwo:
329 ; CHECK-NEXT: add w8, w0, #128 // =128
330 ; CHECK-NEXT: and w8, w8, #0xffff
331 ; CHECK-NEXT: cmp w8, #768 // =768
332 ; CHECK-NEXT: cset w0, lo
334 %tmp0 = add i16 %x, 128 ; 1U << (8-1)
335 %tmp1 = icmp ult i16 %tmp0, 768 ; (1U << 8)) + (1U << (8+1))
339 ; Magic check fails, 64 << 1 != 256
340 define i1 @add_ultcmp_bad_i16_i8_magic(i16 %x) nounwind {
341 ; CHECK-LABEL: add_ultcmp_bad_i16_i8_magic:
343 ; CHECK-NEXT: add w8, w0, #64 // =64
344 ; CHECK-NEXT: and w8, w8, #0xffff
345 ; CHECK-NEXT: cmp w8, #256 // =256
346 ; CHECK-NEXT: cset w0, lo
348 %tmp0 = add i16 %x, 64 ; 1U << (8-1-1)
349 %tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8
353 ; Bad 'destination type'
354 define i1 @add_ultcmp_bad_i16_i4(i16 %x) nounwind {
355 ; CHECK-LABEL: add_ultcmp_bad_i16_i4:
357 ; CHECK-NEXT: add w8, w0, #8 // =8
358 ; CHECK-NEXT: and w8, w8, #0xffff
359 ; CHECK-NEXT: cmp w8, #16 // =16
360 ; CHECK-NEXT: cset w0, lo
362 %tmp0 = add i16 %x, 8 ; 1U << (4-1)
363 %tmp1 = icmp ult i16 %tmp0, 16 ; 1U << 4
368 define i1 @add_ultcmp_bad_i24_i8(i24 %x) nounwind {
369 ; CHECK-LABEL: add_ultcmp_bad_i24_i8:
371 ; CHECK-NEXT: add w8, w0, #128 // =128
372 ; CHECK-NEXT: and w8, w8, #0xffffff
373 ; CHECK-NEXT: cmp w8, #256 // =256
374 ; CHECK-NEXT: cset w0, lo
376 %tmp0 = add i24 %x, 128 ; 1U << (8-1)
377 %tmp1 = icmp ult i24 %tmp0, 256 ; 1U << 8
381 define i1 @add_ulecmp_bad_i16_i8(i16 %x) nounwind {
382 ; CHECK-LABEL: add_ulecmp_bad_i16_i8:
384 ; CHECK-NEXT: mov w0, #1
386 %tmp0 = add i16 %x, 128 ; 1U << (8-1)
387 %tmp1 = icmp ule i16 %tmp0, -1 ; when we +1 it, it will wrap to 0