1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s | FileCheck %s
3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
4 target triple = "x86_64-pc-linux"
7 %0 = type <{ i64, i64, %1, %1, [21 x %2] }>
8 %1 = type <{ i64, i64, i64 }>
9 %2 = type <{ i32, i32, i8 addrspace(2)* }>
10 %3 = type { i8*, i8*, i8*, i8*, i32 }
11 %4 = type <{ %5*, i8*, i32, i32, [4 x i64], [4 x i64], [4 x i64], [4 x i64], [4 x i64] }>
12 %5 = type <{ void (i32)*, i8*, i32 (i8*, ...)* }>
14 define void @foo(i8* nocapture %_stubArgs) nounwind {
16 ; CHECK: # %bb.0: # %entry
17 ; CHECK-NEXT: subq $152, %rsp
18 ; CHECK-NEXT: movq 48(%rdi), %rax
19 ; CHECK-NEXT: movl 64(%rdi), %edx
20 ; CHECK-NEXT: movl $200, %esi
21 ; CHECK-NEXT: addl 68(%rdi), %esi
22 ; CHECK-NEXT: imull $46, %edx, %ecx
23 ; CHECK-NEXT: addq %rsi, %rcx
24 ; CHECK-NEXT: shlq $4, %rcx
25 ; CHECK-NEXT: imull $47, %edx, %edx
26 ; CHECK-NEXT: addq %rsi, %rdx
27 ; CHECK-NEXT: shlq $4, %rdx
28 ; CHECK-NEXT: movaps (%rax,%rdx), %xmm0
29 ; CHECK-NEXT: cmpl $0, (%rdi)
30 ; CHECK-NEXT: jne .LBB0_1
31 ; CHECK-NEXT: # %bb.2: # %entry
32 ; CHECK-NEXT: xorps %xmm1, %xmm1
33 ; CHECK-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
34 ; CHECK-NEXT: je .LBB0_4
35 ; CHECK-NEXT: jmp .LBB0_5
36 ; CHECK-NEXT: .LBB0_1:
37 ; CHECK-NEXT: movaps (%rax,%rcx), %xmm1
38 ; CHECK-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
39 ; CHECK-NEXT: jne .LBB0_5
40 ; CHECK-NEXT: .LBB0_4: # %entry
41 ; CHECK-NEXT: xorps %xmm0, %xmm0
42 ; CHECK-NEXT: .LBB0_5: # %entry
43 ; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
44 ; CHECK-NEXT: addq $152, %rsp
47 %i0 = alloca i8*, align 8
48 %i2 = alloca i8*, align 8
49 %b.i = alloca [16 x <2 x double>], align 16
50 %conv = bitcast i8* %_stubArgs to i32*
51 %tmp1 = load i32, i32* %conv, align 4
52 %ptr8 = getelementptr i8, i8* %_stubArgs, i64 16
53 %i4 = bitcast i8* %ptr8 to <2 x double>*
54 %ptr20 = getelementptr i8, i8* %_stubArgs, i64 48
55 %i7 = bitcast i8* %ptr20 to <2 x double> addrspace(1)**
56 %tmp21 = load <2 x double> addrspace(1)*, <2 x double> addrspace(1)** %i7, align 8
57 %ptr28 = getelementptr i8, i8* %_stubArgs, i64 64
58 %i9 = bitcast i8* %ptr28 to i32*
59 %tmp29 = load i32, i32* %i9, align 4
60 %ptr32 = getelementptr i8, i8* %_stubArgs, i64 68
61 %i10 = bitcast i8* %ptr32 to i32*
62 %tmp33 = load i32, i32* %i10, align 4
63 %tmp17.i = mul i32 10, 20
64 %tmp19.i = add i32 %tmp17.i, %tmp33
65 %conv21.i = zext i32 %tmp19.i to i64
66 %tmp6.i = and i32 42, -32
67 %tmp42.i = add i32 %tmp6.i, 17
68 %tmp44.i = insertelement <2 x i32> undef, i32 %tmp42.i, i32 1
69 %tmp96676677.i = or i32 17, -4
70 %ptr4438.i = getelementptr inbounds [16 x <2 x double>], [16 x <2 x double>]* %b.i, i64 0, i64 0
71 %arrayidx4506.i = getelementptr [16 x <2 x double>], [16 x <2 x double>]* %b.i, i64 0, i64 4
72 %tmp52.i = insertelement <2 x i32> %tmp44.i, i32 0, i32 0
73 %tmp78.i = extractelement <2 x i32> %tmp44.i, i32 1
74 %tmp97.i = add i32 %tmp78.i, %tmp96676677.i
75 %tmp99.i = insertelement <2 x i32> %tmp52.i, i32 %tmp97.i, i32 1
76 %tmp154.i = extractelement <2 x i32> %tmp99.i, i32 1
77 %tmp156.i = extractelement <2 x i32> %tmp52.i, i32 0
78 %tmp158.i = urem i32 %tmp156.i, %tmp1
79 %i38 = mul i32 %tmp154.i, %tmp29
80 %i39 = add i32 %tmp158.i, %i38
81 %conv160.i = zext i32 %i39 to i64
82 %tmp22.sum652.i = add i64 %conv160.i, %conv21.i
83 %arrayidx161.i = getelementptr <2 x double>, <2 x double> addrspace(1)* %tmp21, i64 %tmp22.sum652.i
84 %tmp162.i = load <2 x double>, <2 x double> addrspace(1)* %arrayidx161.i, align 16
85 %tmp222.i = add i32 %tmp154.i, 1
86 %i43 = mul i32 %tmp222.i, %tmp29
87 %i44 = add i32 %tmp158.i, %i43
88 %conv228.i = zext i32 %i44 to i64
89 %tmp22.sum656.i = add i64 %conv228.i, %conv21.i
90 %arrayidx229.i = getelementptr <2 x double>, <2 x double> addrspace(1)* %tmp21, i64 %tmp22.sum656.i
91 %tmp230.i = load <2 x double>, <2 x double> addrspace(1)* %arrayidx229.i, align 16
92 %cmp432.i = icmp ult i32 %tmp156.i, %tmp1
94 ; %shl.i should not be sinked below the compare.
96 %cond.i = select i1 %cmp432.i, <2 x double> %tmp162.i, <2 x double> zeroinitializer
97 store <2 x double> %cond.i, <2 x double>* %ptr4438.i, align 16
98 %cond448.i = select i1 %cmp432.i, <2 x double> %tmp230.i, <2 x double> zeroinitializer
99 store <2 x double> %cond448.i, <2 x double>* %arrayidx4506.i, align 16