1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown -verify-machineinstrs | FileCheck %s --check-prefix=X64
3 ; RUN: llc < %s -mtriple=i686-unknown -verify-machineinstrs | FileCheck %s --check-prefix=X86
5 ; This test is targeted at 64-bit mode. It used to crash due to the creation of an EXTRACT_SUBREG after the peephole pass had ran.
9 ; X64-NEXT: movb (%rax), %al
10 ; X64-NEXT: cmpb $0, (%rax)
11 ; X64-NEXT: setne (%rax)
12 ; X64-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
13 ; X64-NEXT: movq %rax, (%rax)
14 ; X64-NEXT: movb $0, (%rax)
19 ; X86-NEXT: pushl %ebp
20 ; X86-NEXT: .cfi_def_cfa_offset 8
21 ; X86-NEXT: .cfi_offset %ebp, -8
22 ; X86-NEXT: movl %esp, %ebp
23 ; X86-NEXT: .cfi_def_cfa_register %ebp
24 ; X86-NEXT: andl $-8, %esp
25 ; X86-NEXT: subl $16, %esp
26 ; X86-NEXT: movb (%eax), %al
27 ; X86-NEXT: cmpb $0, (%eax)
28 ; X86-NEXT: setne (%eax)
29 ; X86-NEXT: leal -{{[0-9]+}}(%esp), %eax
30 ; X86-NEXT: movl %eax, (%eax)
31 ; X86-NEXT: movb $0, (%eax)
32 ; X86-NEXT: movl %ebp, %esp
34 ; X86-NEXT: .cfi_def_cfa %esp, 4
38 %L17 = load i66, i66* %A30
39 %B20 = and i66 %L17, -1
40 %G2 = getelementptr i66, i66* %A30, i1 true
41 %L10 = load volatile i8, i8* undef
42 %L11 = load volatile i8, i8* undef
43 %B6 = udiv i8 %L10, %L11
44 %C15 = icmp eq i8 %L11, 0
45 %B8 = srem i66 0, %B20
46 %C2 = icmp ule i66 %B8, %B20
48 %C19 = icmp uge i1 false, %C2
49 %C1 = icmp sle i8 undef, %B5
50 %B37 = srem i1 %C1, %C2
51 %C7 = icmp uge i1 false, %C15
52 store i1 %C7, i1* undef
53 %G6 = getelementptr i66, i66* %G2, i1 %B37
54 store i66* %G6, i66** undef
55 %B30 = srem i1 %C19, %C7
56 store i1 %B30, i1* undef
60 ; Similar to above, but bitwidth adjusted to target 32-bit mode. This also shows that we didn't constrain the register class when extracting a subreg.
64 ; X64-NEXT: movb (%rax), %al
65 ; X64-NEXT: cmpb $0, (%rax)
66 ; X64-NEXT: setne (%rax)
67 ; X64-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
68 ; X64-NEXT: movq %rax, (%rax)
69 ; X64-NEXT: movb $0, (%rax)
74 ; X86-NEXT: pushl %ebp
75 ; X86-NEXT: .cfi_def_cfa_offset 8
76 ; X86-NEXT: .cfi_offset %ebp, -8
77 ; X86-NEXT: movl %esp, %ebp
78 ; X86-NEXT: .cfi_def_cfa_register %ebp
79 ; X86-NEXT: andl $-8, %esp
80 ; X86-NEXT: subl $8, %esp
81 ; X86-NEXT: movb (%eax), %al
82 ; X86-NEXT: cmpb $0, (%eax)
83 ; X86-NEXT: setne (%eax)
84 ; X86-NEXT: leal -{{[0-9]+}}(%esp), %eax
85 ; X86-NEXT: movl %eax, (%eax)
86 ; X86-NEXT: movb $0, (%eax)
87 ; X86-NEXT: movl %ebp, %esp
89 ; X86-NEXT: .cfi_def_cfa %esp, 4
93 %L17 = load i34, i34* %A30
94 %B20 = and i34 %L17, -1
95 %G2 = getelementptr i34, i34* %A30, i1 true
96 %L10 = load volatile i8, i8* undef
97 %L11 = load volatile i8, i8* undef
98 %B6 = udiv i8 %L10, %L11
99 %C15 = icmp eq i8 %L11, 0
100 %B8 = srem i34 0, %B20
101 %C2 = icmp ule i34 %B8, %B20
103 %C19 = icmp uge i1 false, %C2
104 %C1 = icmp sle i8 undef, %B5
105 %B37 = srem i1 %C1, %C2
106 %C7 = icmp uge i1 false, %C15
107 store i1 %C7, i1* undef
108 %G6 = getelementptr i34, i34* %G2, i1 %B37
109 store i34* %G6, i34** undef
110 %B30 = srem i1 %C19, %C7
111 store i1 %B30, i1* undef