1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown -verify-machineinstrs | FileCheck %s --check-prefix=X64
3 ; RUN: llc < %s -mtriple=i686-unknown -verify-machineinstrs | FileCheck %s --check-prefix=X86
5 ; This test is targeted at 64-bit mode. It used to crash due to the creation of an EXTRACT_SUBREG after the peephole pass had ran.
9 ; X64-NEXT: movb (%rax), %al
10 ; X64-NEXT: movb (%rax), %al
11 ; X64-NEXT: testb %al, %al
12 ; X64-NEXT: setne (%rax)
13 ; X64-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
14 ; X64-NEXT: movq %rax, (%rax)
15 ; X64-NEXT: movb $0, (%rax)
20 ; X86-NEXT: pushl %ebp
21 ; X86-NEXT: .cfi_def_cfa_offset 8
22 ; X86-NEXT: .cfi_offset %ebp, -8
23 ; X86-NEXT: movl %esp, %ebp
24 ; X86-NEXT: .cfi_def_cfa_register %ebp
25 ; X86-NEXT: andl $-8, %esp
26 ; X86-NEXT: subl $16, %esp
27 ; X86-NEXT: movb (%eax), %al
28 ; X86-NEXT: movb (%eax), %al
29 ; X86-NEXT: testb %al, %al
30 ; X86-NEXT: setne (%eax)
31 ; X86-NEXT: leal -{{[0-9]+}}(%esp), %eax
32 ; X86-NEXT: movl %eax, (%eax)
33 ; X86-NEXT: movb $0, (%eax)
34 ; X86-NEXT: movl %ebp, %esp
36 ; X86-NEXT: .cfi_def_cfa %esp, 4
40 %L17 = load i66, i66* %A30
41 %B20 = and i66 %L17, -1
42 %G2 = getelementptr i66, i66* %A30, i1 true
43 %L10 = load volatile i8, i8* undef
44 %L11 = load volatile i8, i8* undef
45 %B6 = udiv i8 %L10, %L11
46 %C15 = icmp eq i8 undef, 0
47 %B8 = srem i66 0, %B20
48 %C2 = icmp ule i66 %B8, %B20
50 %C19 = icmp uge i1 false, %C2
51 %C1 = icmp sle i8 undef, %B5
52 %B37 = srem i1 %C1, %C2
53 %C7 = icmp uge i1 false, %C15
54 store i1 %C7, i1* undef
55 %G6 = getelementptr i66, i66* %G2, i1 %B37
56 store i66* %G6, i66** undef
57 %B30 = srem i1 %C19, %C7
58 store i1 %B30, i1* undef
62 ; Similar to above, but bitwidth adjusted to target 32-bit mode. This also shows that we didn't constrain the register class when extracting a subreg.
66 ; X64-NEXT: movb (%rax), %al
67 ; X64-NEXT: movb (%rax), %al
68 ; X64-NEXT: testb %al, %al
69 ; X64-NEXT: setne (%rax)
70 ; X64-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
71 ; X64-NEXT: movq %rax, (%rax)
72 ; X64-NEXT: movb $0, (%rax)
77 ; X86-NEXT: pushl %ebp
78 ; X86-NEXT: .cfi_def_cfa_offset 8
79 ; X86-NEXT: .cfi_offset %ebp, -8
80 ; X86-NEXT: movl %esp, %ebp
81 ; X86-NEXT: .cfi_def_cfa_register %ebp
82 ; X86-NEXT: andl $-8, %esp
83 ; X86-NEXT: subl $8, %esp
84 ; X86-NEXT: movb (%eax), %al
85 ; X86-NEXT: movb (%eax), %al
86 ; X86-NEXT: testb %al, %al
87 ; X86-NEXT: setne (%eax)
88 ; X86-NEXT: leal -{{[0-9]+}}(%esp), %eax
89 ; X86-NEXT: movl %eax, (%eax)
90 ; X86-NEXT: movb $0, (%eax)
91 ; X86-NEXT: movl %ebp, %esp
93 ; X86-NEXT: .cfi_def_cfa %esp, 4
97 %L17 = load i34, i34* %A30
98 %B20 = and i34 %L17, -1
99 %G2 = getelementptr i34, i34* %A30, i1 true
100 %L10 = load volatile i8, i8* undef
101 %L11 = load volatile i8, i8* undef
102 %B6 = udiv i8 %L10, %L11
103 %C15 = icmp eq i8 undef, 0
104 %B8 = srem i34 0, %B20
105 %C2 = icmp ule i34 %B8, %B20
107 %C19 = icmp uge i1 false, %C2
108 %C1 = icmp sle i8 undef, %B5
109 %B37 = srem i1 %C1, %C2
110 %C7 = icmp uge i1 false, %C15
111 store i1 %C7, i1* undef
112 %G6 = getelementptr i34, i34* %G2, i1 %B37
113 store i34* %G6, i34** undef
114 %B30 = srem i1 %C19, %C7
115 store i1 %B30, i1* undef