1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=X86
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=X64
5 ; Check for assert in foldMaskAndShiftToScale due to out of range mask scaling.
7 @b = common global i8 zeroinitializer, align 1
8 @c = common global i8 zeroinitializer, align 1
9 @d = common global i64 zeroinitializer, align 8
10 @e = common global i64 zeroinitializer, align 8
12 define void @foo(i64 %x) nounwind {
15 ; X86-NEXT: pushl %eax
16 ; X86-NEXT: movl d+4, %eax
18 ; X86-NEXT: movl d, %ecx
20 ; X86-NEXT: andl $-566231040, %ecx # imm = 0xDE400000
21 ; X86-NEXT: andl $701685459, %eax # imm = 0x29D2DED3
22 ; X86-NEXT: shrdl $21, %eax, %ecx
23 ; X86-NEXT: shrl $21, %eax
24 ; X86-NEXT: addl $7, %ecx
25 ; X86-NEXT: adcl $0, %eax
26 ; X86-NEXT: pushl %eax
27 ; X86-NEXT: pushl %ecx
28 ; X86-NEXT: pushl {{[0-9]+}}(%esp)
29 ; X86-NEXT: pushl {{[0-9]+}}(%esp)
30 ; X86-NEXT: calll __divdi3
31 ; X86-NEXT: addl $16, %esp
32 ; X86-NEXT: orl %eax, %edx
33 ; X86-NEXT: setne {{[0-9]+}}(%esp)
39 ; X64-NEXT: movq %rdi, %rax
40 ; X64-NEXT: movq {{.*}}(%rip), %rcx
41 ; X64-NEXT: movabsq $3013716102212485120, %rdx # imm = 0x29D2DED3DE400000
42 ; X64-NEXT: andnq %rdx, %rcx, %rcx
43 ; X64-NEXT: shrq $21, %rcx
44 ; X64-NEXT: addq $7, %rcx
45 ; X64-NEXT: movq %rdi, %rdx
46 ; X64-NEXT: orq %rcx, %rdx
47 ; X64-NEXT: shrq $32, %rdx
48 ; X64-NEXT: je .LBB0_1
51 ; X64-NEXT: idivq %rcx
52 ; X64-NEXT: jmp .LBB0_3
54 ; X64-NEXT: # kill: def $eax killed $eax killed $rax
55 ; X64-NEXT: xorl %edx, %edx
57 ; X64-NEXT: # kill: def $eax killed $eax def $rax
59 ; X64-NEXT: testq %rax, %rax
60 ; X64-NEXT: setne -{{[0-9]+}}(%rsp)
62 %1 = alloca i8, align 1
63 %2 = load i64, i64* @d, align 8
64 %3 = or i64 -3013716102214263007, %2
66 %5 = load i64, i64* @e, align 8
67 %6 = load i8, i8* @b, align 1
68 %7 = trunc i8 %6 to i1
69 %8 = zext i1 %7 to i64
71 %10 = load i8, i8* @c, align 1
72 %11 = trunc i8 %10 to i1
73 %12 = zext i1 %11 to i32
74 %13 = or i32 551409149, %12
75 %14 = sub nsw i32 %13, 551409131
76 %15 = zext i32 %14 to i64
78 %17 = sub nsw i64 %16, 223084523
79 %18 = ashr i64 %4, %17
80 %19 = and i64 %18, 9223372036854775806
81 %20 = add nsw i64 7, %19
82 %21 = sdiv i64 %x, %20
83 %22 = icmp ne i64 %21, 0
84 %23 = zext i1 %22 to i8
85 store i8 %23, i8* %1, align 1