1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2 ; RUN: llc < %s | FileCheck %s
4 target datalayout = "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:32-n8:16:32-a:0:32-S32"
5 target triple = "i386-pc-windows-gnu"
7 ; This function uses esi as base pointer, the inline asm clobbers esi, so we
8 ; should save esi using esp before the inline asm, and restore esi after the
11 define i32 @clober_bp() {
12 ; CHECK-LABEL: clober_bp:
13 ; CHECK: # %bb.0: # %entry
14 ; CHECK-NEXT: pushl %ebp
15 ; CHECK-NEXT: .cfi_def_cfa_offset 8
16 ; CHECK-NEXT: .cfi_offset %ebp, -8
17 ; CHECK-NEXT: movl %esp, %ebp
18 ; CHECK-NEXT: .cfi_def_cfa_register %ebp
19 ; CHECK-NEXT: pushl %edi
20 ; CHECK-NEXT: pushl %esi
21 ; CHECK-NEXT: andl $-16, %esp
22 ; CHECK-NEXT: subl $16, %esp
23 ; CHECK-NEXT: movl %esp, %esi
24 ; CHECK-NEXT: .cfi_offset %esi, -16
25 ; CHECK-NEXT: .cfi_offset %edi, -12
26 ; CHECK-NEXT: movl $4, 12(%esi)
27 ; CHECK-NEXT: movl 12(%esi), %eax
28 ; CHECK-NEXT: addl $3, %eax
29 ; CHECK-NEXT: andl $-4, %eax
30 ; CHECK-NEXT: calll __alloca
31 ; CHECK-NEXT: movl %esp, %eax
32 ; CHECK-NEXT: andl $-16, %eax
33 ; CHECK-NEXT: movl %eax, %esp
34 ; CHECK-NEXT: movl $1, (%eax)
35 ; CHECK-NEXT: leal 8(%esi), %edi
36 ; CHECK-NEXT: movl $4, %ecx
37 ; CHECK-NEXT: pushl %esi
38 ; CHECK-NEXT: movl %eax, %esi
40 ; CHECK-NEXT: rep movsb (%esi), %es:(%edi)
42 ; CHECK-NEXT: popl %esi
43 ; CHECK-NEXT: movl 8(%esi), %eax
44 ; CHECK-NEXT: leal -8(%ebp), %esp
45 ; CHECK-NEXT: popl %esi
46 ; CHECK-NEXT: popl %edi
47 ; CHECK-NEXT: popl %ebp
50 %size = alloca i32, align 4
51 %g = alloca i32, align 4
52 store volatile i32 4, ptr %size, align 4
53 %len = load volatile i32, ptr %size, align 4
54 %var_array = alloca i8, i32 %len, align 16
55 store i32 1, ptr %var_array, align 16
56 %nil = call { ptr, ptr, i32 } asm "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %g, ptr %var_array, i32 4)
57 %retval = load i32, ptr %g, align 4
61 ; This function has the same code except the inline asm also clobbers
64 define i32 @clobber_bpfp() {
65 ; CHECK-LABEL: clobber_bpfp:
66 ; CHECK: # %bb.0: # %entry
67 ; CHECK-NEXT: pushl %ebp
68 ; CHECK-NEXT: .cfi_def_cfa_offset 8
69 ; CHECK-NEXT: .cfi_offset %ebp, -8
70 ; CHECK-NEXT: movl %esp, %ebp
71 ; CHECK-NEXT: .cfi_def_cfa_register %ebp
72 ; CHECK-NEXT: pushl %edi
73 ; CHECK-NEXT: pushl %esi
74 ; CHECK-NEXT: andl $-16, %esp
75 ; CHECK-NEXT: subl $16, %esp
76 ; CHECK-NEXT: movl %esp, %esi
77 ; CHECK-NEXT: .cfi_offset %esi, -16
78 ; CHECK-NEXT: .cfi_offset %edi, -12
79 ; CHECK-NEXT: movl $4, 12(%esi)
80 ; CHECK-NEXT: movl 12(%esi), %eax
81 ; CHECK-NEXT: addl $3, %eax
82 ; CHECK-NEXT: andl $-4, %eax
83 ; CHECK-NEXT: calll __alloca
84 ; CHECK-NEXT: movl %esp, %eax
85 ; CHECK-NEXT: andl $-16, %eax
86 ; CHECK-NEXT: movl %eax, %esp
87 ; CHECK-NEXT: movl $1, (%eax)
88 ; CHECK-NEXT: leal 8(%esi), %edi
89 ; CHECK-NEXT: movl $4, %ecx
90 ; CHECK-NEXT: pushl %ebp
91 ; CHECK-NEXT: pushl %esi
92 ; CHECK-NEXT: .cfi_remember_state
93 ; CHECK-NEXT: .cfi_escape 0x0f, 0x06, 0x74, 0x04, 0x06, 0x11, 0x08, 0x22 #
94 ; CHECK-NEXT: movl %eax, %esi
96 ; CHECK-NEXT: rep movsb (%esi), %es:(%edi)
98 ; CHECK-NEXT: popl %esi
99 ; CHECK-NEXT: popl %ebp
100 ; CHECK-NEXT: .cfi_restore_state
101 ; CHECK-NEXT: movl 8(%esi), %eax
102 ; CHECK-NEXT: leal -8(%ebp), %esp
103 ; CHECK-NEXT: popl %esi
104 ; CHECK-NEXT: popl %edi
105 ; CHECK-NEXT: popl %ebp
108 %size = alloca i32, align 4
109 %g = alloca i32, align 4
110 store volatile i32 4, ptr %size, align 4
111 %len = load volatile i32, ptr %size, align 4
112 %var_array = alloca i8, i32 %len, align 16
113 store i32 1, ptr %var_array, align 16
114 %nil = call { ptr, ptr, i32 } asm "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags},~{ebp}"(ptr %g, ptr %var_array, i32 4)
115 %retval = load i32, ptr %g, align 4