1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -mtriple=x86_64-unknown-unknown -S | FileCheck %s
3 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
5 define i16 @test1(float %f) {
7 ; CHECK-NEXT: [[TMP1:%.*]] = fadd float [[F:%.*]], -1.000000e+00
8 ; CHECK-NEXT: [[TMP2:%.*]] = fmul float [[TMP1]], 5.000000e-01
9 ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x float> poison, float [[TMP2]], i32 0
10 ; CHECK-NEXT: [[TMP48:%.*]] = tail call <4 x float> @llvm.x86.sse.min.ss(<4 x float> [[TMP3]], <4 x float> <float 6.553500e+04, float poison, float poison, float poison>)
11 ; CHECK-NEXT: [[TMP59:%.*]] = tail call <4 x float> @llvm.x86.sse.max.ss(<4 x float> [[TMP48]], <4 x float> <float 0.000000e+00, float poison, float poison, float poison>)
12 ; CHECK-NEXT: [[TMP_UPGRD_1:%.*]] = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> [[TMP59]])
13 ; CHECK-NEXT: [[TMP69:%.*]] = trunc i32 [[TMP_UPGRD_1]] to i16
14 ; CHECK-NEXT: ret i16 [[TMP69]]
16 %tmp = insertelement <4 x float> undef, float %f, i32 0
17 %tmp10 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1
18 %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2
19 %tmp12 = insertelement <4 x float> %tmp11, float 0.000000e+00, i32 3
20 %tmp28 = tail call <4 x float> @llvm.x86.sse.sub.ss( <4 x float> %tmp12, <4 x float> < float 1.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > )
21 %tmp37 = tail call <4 x float> @llvm.x86.sse.mul.ss( <4 x float> %tmp28, <4 x float> < float 5.000000e-01, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > )
22 %tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp37, <4 x float> < float 6.553500e+04, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > )
23 %tmp59 = tail call <4 x float> @llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> zeroinitializer )
24 %tmp.upgrd.1 = tail call i32 @llvm.x86.sse.cvttss2si( <4 x float> %tmp59 )
25 %tmp69 = trunc i32 %tmp.upgrd.1 to i16
29 define i64 @test3(float %f, double %d) {
30 ; CHECK-LABEL: @test3(
31 ; CHECK-NEXT: [[V03:%.*]] = insertelement <4 x float> poison, float [[F:%.*]], i32 0
32 ; CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> [[V03]])
33 ; CHECK-NEXT: [[V13:%.*]] = insertelement <4 x float> poison, float [[F]], i32 0
34 ; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> [[V13]])
35 ; CHECK-NEXT: [[V23:%.*]] = insertelement <4 x float> poison, float [[F]], i32 0
36 ; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> [[V23]])
37 ; CHECK-NEXT: [[V33:%.*]] = insertelement <4 x float> poison, float [[F]], i32 0
38 ; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> [[V33]])
39 ; CHECK-NEXT: [[V41:%.*]] = insertelement <2 x double> poison, double [[D:%.*]], i32 0
40 ; CHECK-NEXT: [[TMP4:%.*]] = tail call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> [[V41]])
41 ; CHECK-NEXT: [[V51:%.*]] = insertelement <2 x double> poison, double [[D]], i32 0
42 ; CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> [[V51]])
43 ; CHECK-NEXT: [[V61:%.*]] = insertelement <2 x double> poison, double [[D]], i32 0
44 ; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> [[V61]])
45 ; CHECK-NEXT: [[V71:%.*]] = insertelement <2 x double> poison, double [[D]], i32 0
46 ; CHECK-NEXT: [[TMP7:%.*]] = tail call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> [[V71]])
47 ; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP0]], [[TMP2]]
48 ; CHECK-NEXT: [[TMP9:%.*]] = add i32 [[TMP4]], [[TMP6]]
49 ; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP8]], [[TMP9]]
50 ; CHECK-NEXT: [[TMP11:%.*]] = sext i32 [[TMP10]] to i64
51 ; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP1]], [[TMP3]]
52 ; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP5]], [[TMP7]]
53 ; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP12]], [[TMP13]]
54 ; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], [[TMP11]]
55 ; CHECK-NEXT: ret i64 [[TMP15]]
57 %v00 = insertelement <4 x float> undef, float %f, i32 0
58 %v01 = insertelement <4 x float> %v00, float 0.000000e+00, i32 1
59 %v02 = insertelement <4 x float> %v01, float 0.000000e+00, i32 2
60 %v03 = insertelement <4 x float> %v02, float 0.000000e+00, i32 3
61 %tmp0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> %v03)
62 %v10 = insertelement <4 x float> undef, float %f, i32 0
63 %v11 = insertelement <4 x float> %v10, float 0.000000e+00, i32 1
64 %v12 = insertelement <4 x float> %v11, float 0.000000e+00, i32 2
65 %v13 = insertelement <4 x float> %v12, float 0.000000e+00, i32 3
66 %tmp1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %v13)
67 %v20 = insertelement <4 x float> undef, float %f, i32 0
68 %v21 = insertelement <4 x float> %v20, float 0.000000e+00, i32 1
69 %v22 = insertelement <4 x float> %v21, float 0.000000e+00, i32 2
70 %v23 = insertelement <4 x float> %v22, float 0.000000e+00, i32 3
71 %tmp2 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %v23)
72 %v30 = insertelement <4 x float> undef, float %f, i32 0
73 %v31 = insertelement <4 x float> %v30, float 0.000000e+00, i32 1
74 %v32 = insertelement <4 x float> %v31, float 0.000000e+00, i32 2
75 %v33 = insertelement <4 x float> %v32, float 0.000000e+00, i32 3
76 %tmp3 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %v33)
77 %v40 = insertelement <2 x double> undef, double %d, i32 0
78 %v41 = insertelement <2 x double> %v40, double 0.000000e+00, i32 1
79 %tmp4 = tail call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %v41)
80 %v50 = insertelement <2 x double> undef, double %d, i32 0
81 %v51 = insertelement <2 x double> %v50, double 0.000000e+00, i32 1
82 %tmp5 = tail call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %v51)
83 %v60 = insertelement <2 x double> undef, double %d, i32 0
84 %v61 = insertelement <2 x double> %v60, double 0.000000e+00, i32 1
85 %tmp6 = tail call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %v61)
86 %v70 = insertelement <2 x double> undef, double %d, i32 0
87 %v71 = insertelement <2 x double> %v70, double 0.000000e+00, i32 1
88 %tmp7 = tail call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %v71)
89 %tmp8 = add i32 %tmp0, %tmp2
90 %tmp9 = add i32 %tmp4, %tmp6
91 %tmp10 = add i32 %tmp8, %tmp9
92 %tmp11 = sext i32 %tmp10 to i64
93 %tmp12 = add i64 %tmp1, %tmp3
94 %tmp13 = add i64 %tmp5, %tmp7
95 %tmp14 = add i64 %tmp12, %tmp13
96 %tmp15 = add i64 %tmp11, %tmp14
100 declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>)
101 declare <4 x float> @llvm.x86.sse.mul.ss(<4 x float>, <4 x float>)
102 declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>)
103 declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>)
104 declare i32 @llvm.x86.sse.cvtss2si(<4 x float>)
105 declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>)
106 declare i32 @llvm.x86.sse.cvttss2si(<4 x float>)
107 declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>)
108 declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>)
109 declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>)
110 declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>)
111 declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>)