1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s | FileCheck %s
3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
4 target triple = "x86_64-pc-linux"
7 %0 = type <{ i64, i64, %1, %1, [21 x %2] }>
8 %1 = type <{ i64, i64, i64 }>
9 %2 = type <{ i32, i32, i8 addrspace(2)* }>
10 %3 = type { i8*, i8*, i8*, i8*, i32 }
11 %4 = type <{ %5*, i8*, i32, i32, [4 x i64], [4 x i64], [4 x i64], [4 x i64], [4 x i64] }>
12 %5 = type <{ void (i32)*, i8*, i32 (i8*, ...)* }>
14 define void @foo(i8* nocapture %_stubArgs) nounwind {
16 ; CHECK: # %bb.0: # %entry
17 ; CHECK-NEXT: movq 48(%rdi), %rax
18 ; CHECK-NEXT: movl 64(%rdi), %edx
19 ; CHECK-NEXT: movl $200, %esi
20 ; CHECK-NEXT: addl 68(%rdi), %esi
21 ; CHECK-NEXT: imull $46, %edx, %ecx
22 ; CHECK-NEXT: addq %rsi, %rcx
23 ; CHECK-NEXT: shlq $4, %rcx
24 ; CHECK-NEXT: imull $47, %edx, %edx
25 ; CHECK-NEXT: addq %rsi, %rdx
26 ; CHECK-NEXT: shlq $4, %rdx
27 ; CHECK-NEXT: movaps (%rax,%rdx), %xmm0
28 ; CHECK-NEXT: cmpl $0, (%rdi)
29 ; CHECK-NEXT: jne .LBB0_1
30 ; CHECK-NEXT: # %bb.2: # %entry
31 ; CHECK-NEXT: xorps %xmm1, %xmm1
32 ; CHECK-NEXT: jmp .LBB0_3
33 ; CHECK-NEXT: .LBB0_1:
34 ; CHECK-NEXT: movaps (%rax,%rcx), %xmm1
35 ; CHECK-NEXT: .LBB0_3: # %entry
36 ; CHECK-NEXT: leaq -{{[0-9]+}}(%rsp), %rsp
37 ; CHECK-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
38 ; CHECK-NEXT: jne .LBB0_5
39 ; CHECK-NEXT: # %bb.4: # %entry
40 ; CHECK-NEXT: xorps %xmm0, %xmm0
41 ; CHECK-NEXT: .LBB0_5: # %entry
42 ; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
43 ; CHECK-NEXT: addq $152, %rsp
46 %i0 = alloca i8*, align 8
47 %i2 = alloca i8*, align 8
48 %b.i = alloca [16 x <2 x double>], align 16
49 %conv = bitcast i8* %_stubArgs to i32*
50 %tmp1 = load i32, i32* %conv, align 4
51 %ptr8 = getelementptr i8, i8* %_stubArgs, i64 16
52 %i4 = bitcast i8* %ptr8 to <2 x double>*
53 %ptr20 = getelementptr i8, i8* %_stubArgs, i64 48
54 %i7 = bitcast i8* %ptr20 to <2 x double> addrspace(1)**
55 %tmp21 = load <2 x double> addrspace(1)*, <2 x double> addrspace(1)** %i7, align 8
56 %ptr28 = getelementptr i8, i8* %_stubArgs, i64 64
57 %i9 = bitcast i8* %ptr28 to i32*
58 %tmp29 = load i32, i32* %i9, align 4
59 %ptr32 = getelementptr i8, i8* %_stubArgs, i64 68
60 %i10 = bitcast i8* %ptr32 to i32*
61 %tmp33 = load i32, i32* %i10, align 4
62 %tmp17.i = mul i32 10, 20
63 %tmp19.i = add i32 %tmp17.i, %tmp33
64 %conv21.i = zext i32 %tmp19.i to i64
65 %tmp6.i = and i32 42, -32
66 %tmp42.i = add i32 %tmp6.i, 17
67 %tmp44.i = insertelement <2 x i32> undef, i32 %tmp42.i, i32 1
68 %tmp96676677.i = or i32 17, -4
69 %ptr4438.i = getelementptr inbounds [16 x <2 x double>], [16 x <2 x double>]* %b.i, i64 0, i64 0
70 %arrayidx4506.i = getelementptr [16 x <2 x double>], [16 x <2 x double>]* %b.i, i64 0, i64 4
71 %tmp52.i = insertelement <2 x i32> %tmp44.i, i32 0, i32 0
72 %tmp78.i = extractelement <2 x i32> %tmp44.i, i32 1
73 %tmp97.i = add i32 %tmp78.i, %tmp96676677.i
74 %tmp99.i = insertelement <2 x i32> %tmp52.i, i32 %tmp97.i, i32 1
75 %tmp154.i = extractelement <2 x i32> %tmp99.i, i32 1
76 %tmp156.i = extractelement <2 x i32> %tmp52.i, i32 0
77 %tmp158.i = urem i32 %tmp156.i, %tmp1
78 %i38 = mul i32 %tmp154.i, %tmp29
79 %i39 = add i32 %tmp158.i, %i38
80 %conv160.i = zext i32 %i39 to i64
81 %tmp22.sum652.i = add i64 %conv160.i, %conv21.i
82 %arrayidx161.i = getelementptr <2 x double>, <2 x double> addrspace(1)* %tmp21, i64 %tmp22.sum652.i
83 %tmp162.i = load <2 x double>, <2 x double> addrspace(1)* %arrayidx161.i, align 16
84 %tmp222.i = add i32 %tmp154.i, 1
85 %i43 = mul i32 %tmp222.i, %tmp29
86 %i44 = add i32 %tmp158.i, %i43
87 %conv228.i = zext i32 %i44 to i64
88 %tmp22.sum656.i = add i64 %conv228.i, %conv21.i
89 %arrayidx229.i = getelementptr <2 x double>, <2 x double> addrspace(1)* %tmp21, i64 %tmp22.sum656.i
90 %tmp230.i = load <2 x double>, <2 x double> addrspace(1)* %arrayidx229.i, align 16
91 %cmp432.i = icmp ult i32 %tmp156.i, %tmp1
93 ; %shl.i should not be sinked below the compare.
95 %cond.i = select i1 %cmp432.i, <2 x double> %tmp162.i, <2 x double> zeroinitializer
96 store <2 x double> %cond.i, <2 x double>* %ptr4438.i, align 16
97 %cond448.i = select i1 %cmp432.i, <2 x double> %tmp230.i, <2 x double> zeroinitializer
98 store <2 x double> %cond448.i, <2 x double>* %arrayidx4506.i, align 16