1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i386-unknown-linux-gnu | FileCheck %s --check-prefix=X86
3 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=X64
5 @a = common global i32 0, align 4
6 @b = common global i32 0, align 4
7 @c = common global i32 0, align 4
8 @e = common global i32 0, align 4
9 @x = common global i32 0, align 4
10 @f = common global i32 0, align 4
11 @h = common global i32 0, align 4
12 @i = common global i32 0, align 4
14 ; Test -Os to make sure immediates with multiple users don't get pulled in to
16 define i32 @foo() optsize {
18 ; X86: # %bb.0: # %entry
19 ; X86-NEXT: movl $1234, %eax # imm = 0x4D2
20 ; X86-NEXT: movl %eax, a
21 ; X86-NEXT: movl %eax, b
22 ; X86-NEXT: movl $12, %eax
23 ; X86-NEXT: movl %eax, c
24 ; X86-NEXT: cmpl %eax, e
25 ; X86-NEXT: jne .LBB0_2
26 ; X86-NEXT: # %bb.1: # %if.then
27 ; X86-NEXT: movl $1, x
28 ; X86-NEXT: .LBB0_2: # %if.end
29 ; X86-NEXT: movl $1234, f # imm = 0x4D2
30 ; X86-NEXT: movl $555, %eax # imm = 0x22B
31 ; X86-NEXT: movl %eax, h
32 ; X86-NEXT: addl %eax, i
33 ; X86-NEXT: xorl %eax, %eax
37 ; X64: # %bb.0: # %entry
38 ; X64-NEXT: movl $1234, %eax # imm = 0x4D2
39 ; X64-NEXT: movl %eax, {{.*}}(%rip)
40 ; X64-NEXT: movl %eax, {{.*}}(%rip)
41 ; X64-NEXT: movl $12, %eax
42 ; X64-NEXT: movl %eax, {{.*}}(%rip)
43 ; X64-NEXT: cmpl %eax, {{.*}}(%rip)
44 ; X64-NEXT: jne .LBB0_2
45 ; X64-NEXT: # %bb.1: # %if.then
46 ; X64-NEXT: movl $1, {{.*}}(%rip)
47 ; X64-NEXT: .LBB0_2: # %if.end
48 ; X64-NEXT: movl $1234, {{.*}}(%rip) # imm = 0x4D2
49 ; X64-NEXT: movl $555, %eax # imm = 0x22B
50 ; X64-NEXT: movl %eax, {{.*}}(%rip)
51 ; X64-NEXT: addl %eax, {{.*}}(%rip)
52 ; X64-NEXT: xorl %eax, %eax
55 store i32 1234, i32* @a
56 store i32 1234, i32* @b
58 %0 = load i32, i32* @e
59 %cmp = icmp eq i32 %0, 12
60 br i1 %cmp, label %if.then, label %if.end
62 if.then: ; preds = %entry
66 ; New block.. Make sure 1234 isn't live across basic blocks from before.
67 if.end: ; preds = %if.then, %entry
68 store i32 1234, i32* @f
69 store i32 555, i32* @h
70 %1 = load i32, i32* @i
71 %add1 = add nsw i32 %1, 555
72 store i32 %add1, i32* @i
76 ; Test -O2 to make sure that all immediates get pulled in to their users.
79 ; X86: # %bb.0: # %entry
80 ; X86-NEXT: movl $1234, a # imm = 0x4D2
81 ; X86-NEXT: movl $1234, b # imm = 0x4D2
82 ; X86-NEXT: xorl %eax, %eax
86 ; X64: # %bb.0: # %entry
87 ; X64-NEXT: movl $1234, {{.*}}(%rip) # imm = 0x4D2
88 ; X64-NEXT: movl $1234, {{.*}}(%rip) # imm = 0x4D2
89 ; X64-NEXT: xorl %eax, %eax
92 store i32 1234, i32* @a
93 store i32 1234, i32* @b
97 declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #1
99 @AA = common global [100 x i8] zeroinitializer, align 1
101 ; memset gets lowered in DAG. Constant merging should hoist all the
102 ; immediates used to store to the individual memory locations. Make
103 ; sure we don't directly store the immediates.
104 define void @foomemset() optsize {
105 ; X86-LABEL: foomemset:
106 ; X86: # %bb.0: # %entry
107 ; X86-NEXT: movl $555819297, %eax # imm = 0x21212121
108 ; X86-NEXT: movl %eax, AA+20
109 ; X86-NEXT: movl %eax, AA+16
110 ; X86-NEXT: movl %eax, AA+12
111 ; X86-NEXT: movl %eax, AA+8
112 ; X86-NEXT: movl %eax, AA+4
113 ; X86-NEXT: movl %eax, AA
116 ; X64-LABEL: foomemset:
117 ; X64: # %bb.0: # %entry
118 ; X64-NEXT: movabsq $2387225703656530209, %rax # imm = 0x2121212121212121
119 ; X64-NEXT: movq %rax, AA+{{.*}}(%rip)
120 ; X64-NEXT: movq %rax, AA+{{.*}}(%rip)
121 ; X64-NEXT: movq %rax, {{.*}}(%rip)
124 call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([100 x i8], [100 x i8]* @AA, i32 0, i32 0), i8 33, i32 24, i1 false)