1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -mtriple=x86_64-unknown -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE
3 ; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX256
4 ; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=bdver1 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX256
5 ; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX256
6 ; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skylake-avx512 -mattr=-prefer-256-bit -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
7 ; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skylake-avx512 -mattr=+prefer-256-bit -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX256
9 @srcA64 = common global [8 x double] zeroinitializer, align 64
10 @srcB64 = common global [8 x double] zeroinitializer, align 64
11 @srcC64 = common global [8 x double] zeroinitializer, align 64
12 @srcA32 = common global [16 x float] zeroinitializer, align 64
13 @srcB32 = common global [16 x float] zeroinitializer, align 64
14 @srcC32 = common global [16 x float] zeroinitializer, align 64
15 @dst64 = common global [8 x double] zeroinitializer, align 64
16 @dst32 = common global [16 x float] zeroinitializer, align 64
18 declare float @llvm.maxnum.f32(float, float)
19 declare double @llvm.maxnum.f64(double, double)
25 define void @fmaxnum_2f64() #0 {
26 ; CHECK-LABEL: @fmaxnum_2f64(
27 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* bitcast ([8 x double]* @srcA64 to <2 x double>*), align 8
28 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, <2 x double>* bitcast ([8 x double]* @srcB64 to <2 x double>*), align 8
29 ; CHECK-NEXT: [[TMP3:%.*]] = call <2 x double> @llvm.maxnum.v2f64(<2 x double> [[TMP1]], <2 x double> [[TMP2]])
30 ; CHECK-NEXT: store <2 x double> [[TMP3]], <2 x double>* bitcast ([8 x double]* @dst64 to <2 x double>*), align 8
31 ; CHECK-NEXT: ret void
33 %a0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 0), align 8
34 %a1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 1), align 8
35 %b0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 0), align 8
36 %b1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 1), align 8
37 %fmaxnum0 = call double @llvm.maxnum.f64(double %a0, double %b0)
38 %fmaxnum1 = call double @llvm.maxnum.f64(double %a1, double %b1)
39 store double %fmaxnum0, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 0), align 8
40 store double %fmaxnum1, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 1), align 8
44 define void @fmaxnum_4f64() #0 {
45 ; SSE-LABEL: @fmaxnum_4f64(
46 ; SSE-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* bitcast ([8 x double]* @srcA64 to <2 x double>*), align 8
47 ; SSE-NEXT: [[TMP2:%.*]] = load <2 x double>, <2 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 2) to <2 x double>*), align 8
48 ; SSE-NEXT: [[TMP3:%.*]] = load <2 x double>, <2 x double>* bitcast ([8 x double]* @srcB64 to <2 x double>*), align 8
49 ; SSE-NEXT: [[TMP4:%.*]] = load <2 x double>, <2 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 2) to <2 x double>*), align 8
50 ; SSE-NEXT: [[TMP5:%.*]] = call <2 x double> @llvm.maxnum.v2f64(<2 x double> [[TMP1]], <2 x double> [[TMP3]])
51 ; SSE-NEXT: [[TMP6:%.*]] = call <2 x double> @llvm.maxnum.v2f64(<2 x double> [[TMP2]], <2 x double> [[TMP4]])
52 ; SSE-NEXT: store <2 x double> [[TMP5]], <2 x double>* bitcast ([8 x double]* @dst64 to <2 x double>*), align 8
53 ; SSE-NEXT: store <2 x double> [[TMP6]], <2 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 2) to <2 x double>*), align 8
56 ; AVX-LABEL: @fmaxnum_4f64(
57 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, <4 x double>* bitcast ([8 x double]* @srcA64 to <4 x double>*), align 8
58 ; AVX-NEXT: [[TMP2:%.*]] = load <4 x double>, <4 x double>* bitcast ([8 x double]* @srcB64 to <4 x double>*), align 8
59 ; AVX-NEXT: [[TMP3:%.*]] = call <4 x double> @llvm.maxnum.v4f64(<4 x double> [[TMP1]], <4 x double> [[TMP2]])
60 ; AVX-NEXT: store <4 x double> [[TMP3]], <4 x double>* bitcast ([8 x double]* @dst64 to <4 x double>*), align 8
63 %a0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 0), align 8
64 %a1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 1), align 8
65 %a2 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 2), align 8
66 %a3 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 3), align 8
67 %b0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 0), align 8
68 %b1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 1), align 8
69 %b2 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 2), align 8
70 %b3 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 3), align 8
71 %fmaxnum0 = call double @llvm.maxnum.f64(double %a0, double %b0)
72 %fmaxnum1 = call double @llvm.maxnum.f64(double %a1, double %b1)
73 %fmaxnum2 = call double @llvm.maxnum.f64(double %a2, double %b2)
74 %fmaxnum3 = call double @llvm.maxnum.f64(double %a3, double %b3)
75 store double %fmaxnum0, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 0), align 8
76 store double %fmaxnum1, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 1), align 8
77 store double %fmaxnum2, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 2), align 8
78 store double %fmaxnum3, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 3), align 8
82 define void @fmaxnum_8f64() #0 {
83 ; SSE-LABEL: @fmaxnum_8f64(
84 ; SSE-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* bitcast ([8 x double]* @srcA64 to <2 x double>*), align 4
85 ; SSE-NEXT: [[TMP2:%.*]] = load <2 x double>, <2 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 2) to <2 x double>*), align 4
86 ; SSE-NEXT: [[TMP3:%.*]] = load <2 x double>, <2 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 4) to <2 x double>*), align 4
87 ; SSE-NEXT: [[TMP4:%.*]] = load <2 x double>, <2 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 6) to <2 x double>*), align 4
88 ; SSE-NEXT: [[TMP5:%.*]] = load <2 x double>, <2 x double>* bitcast ([8 x double]* @srcB64 to <2 x double>*), align 4
89 ; SSE-NEXT: [[TMP6:%.*]] = load <2 x double>, <2 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 2) to <2 x double>*), align 4
90 ; SSE-NEXT: [[TMP7:%.*]] = load <2 x double>, <2 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 4) to <2 x double>*), align 4
91 ; SSE-NEXT: [[TMP8:%.*]] = load <2 x double>, <2 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 6) to <2 x double>*), align 4
92 ; SSE-NEXT: [[TMP9:%.*]] = call <2 x double> @llvm.maxnum.v2f64(<2 x double> [[TMP1]], <2 x double> [[TMP5]])
93 ; SSE-NEXT: [[TMP10:%.*]] = call <2 x double> @llvm.maxnum.v2f64(<2 x double> [[TMP2]], <2 x double> [[TMP6]])
94 ; SSE-NEXT: [[TMP11:%.*]] = call <2 x double> @llvm.maxnum.v2f64(<2 x double> [[TMP3]], <2 x double> [[TMP7]])
95 ; SSE-NEXT: [[TMP12:%.*]] = call <2 x double> @llvm.maxnum.v2f64(<2 x double> [[TMP4]], <2 x double> [[TMP8]])
96 ; SSE-NEXT: store <2 x double> [[TMP9]], <2 x double>* bitcast ([8 x double]* @dst64 to <2 x double>*), align 4
97 ; SSE-NEXT: store <2 x double> [[TMP10]], <2 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 2) to <2 x double>*), align 4
98 ; SSE-NEXT: store <2 x double> [[TMP11]], <2 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 4) to <2 x double>*), align 4
99 ; SSE-NEXT: store <2 x double> [[TMP12]], <2 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 6) to <2 x double>*), align 4
102 ; AVX256-LABEL: @fmaxnum_8f64(
103 ; AVX256-NEXT: [[TMP1:%.*]] = load <4 x double>, <4 x double>* bitcast ([8 x double]* @srcA64 to <4 x double>*), align 4
104 ; AVX256-NEXT: [[TMP2:%.*]] = load <4 x double>, <4 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 4) to <4 x double>*), align 4
105 ; AVX256-NEXT: [[TMP3:%.*]] = load <4 x double>, <4 x double>* bitcast ([8 x double]* @srcB64 to <4 x double>*), align 4
106 ; AVX256-NEXT: [[TMP4:%.*]] = load <4 x double>, <4 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 4) to <4 x double>*), align 4
107 ; AVX256-NEXT: [[TMP5:%.*]] = call <4 x double> @llvm.maxnum.v4f64(<4 x double> [[TMP1]], <4 x double> [[TMP3]])
108 ; AVX256-NEXT: [[TMP6:%.*]] = call <4 x double> @llvm.maxnum.v4f64(<4 x double> [[TMP2]], <4 x double> [[TMP4]])
109 ; AVX256-NEXT: store <4 x double> [[TMP5]], <4 x double>* bitcast ([8 x double]* @dst64 to <4 x double>*), align 4
110 ; AVX256-NEXT: store <4 x double> [[TMP6]], <4 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 4) to <4 x double>*), align 4
111 ; AVX256-NEXT: ret void
113 ; AVX512-LABEL: @fmaxnum_8f64(
114 ; AVX512-NEXT: [[TMP1:%.*]] = load <8 x double>, <8 x double>* bitcast ([8 x double]* @srcA64 to <8 x double>*), align 4
115 ; AVX512-NEXT: [[TMP2:%.*]] = load <8 x double>, <8 x double>* bitcast ([8 x double]* @srcB64 to <8 x double>*), align 4
116 ; AVX512-NEXT: [[TMP3:%.*]] = call <8 x double> @llvm.maxnum.v8f64(<8 x double> [[TMP1]], <8 x double> [[TMP2]])
117 ; AVX512-NEXT: store <8 x double> [[TMP3]], <8 x double>* bitcast ([8 x double]* @dst64 to <8 x double>*), align 4
118 ; AVX512-NEXT: ret void
120 %a0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 0), align 4
121 %a1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 1), align 4
122 %a2 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 2), align 4
123 %a3 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 3), align 4
124 %a4 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 4), align 4
125 %a5 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 5), align 4
126 %a6 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 6), align 4
127 %a7 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 7), align 4
128 %b0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 0), align 4
129 %b1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 1), align 4
130 %b2 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 2), align 4
131 %b3 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 3), align 4
132 %b4 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 4), align 4
133 %b5 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 5), align 4
134 %b6 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 6), align 4
135 %b7 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 7), align 4
136 %fmaxnum0 = call double @llvm.maxnum.f64(double %a0, double %b0)
137 %fmaxnum1 = call double @llvm.maxnum.f64(double %a1, double %b1)
138 %fmaxnum2 = call double @llvm.maxnum.f64(double %a2, double %b2)
139 %fmaxnum3 = call double @llvm.maxnum.f64(double %a3, double %b3)
140 %fmaxnum4 = call double @llvm.maxnum.f64(double %a4, double %b4)
141 %fmaxnum5 = call double @llvm.maxnum.f64(double %a5, double %b5)
142 %fmaxnum6 = call double @llvm.maxnum.f64(double %a6, double %b6)
143 %fmaxnum7 = call double @llvm.maxnum.f64(double %a7, double %b7)
144 store double %fmaxnum0, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 0), align 4
145 store double %fmaxnum1, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 1), align 4
146 store double %fmaxnum2, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 2), align 4
147 store double %fmaxnum3, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 3), align 4
148 store double %fmaxnum4, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 4), align 4
149 store double %fmaxnum5, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 5), align 4
150 store double %fmaxnum6, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 6), align 4
151 store double %fmaxnum7, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 7), align 4
155 define void @fmaxnum_4f32() #0 {
156 ; CHECK-LABEL: @fmaxnum_4f32(
157 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([16 x float]* @srcA32 to <4 x float>*), align 4
158 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* bitcast ([16 x float]* @srcB32 to <4 x float>*), align 4
159 ; CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[TMP1]], <4 x float> [[TMP2]])
160 ; CHECK-NEXT: store <4 x float> [[TMP3]], <4 x float>* bitcast ([16 x float]* @dst32 to <4 x float>*), align 4
161 ; CHECK-NEXT: ret void
163 %a0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 0), align 4
164 %a1 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 1), align 4
165 %a2 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 2), align 4
166 %a3 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 3), align 4
167 %b0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 0), align 4
168 %b1 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 1), align 4
169 %b2 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 2), align 4
170 %b3 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 3), align 4
171 %fmaxnum0 = call float @llvm.maxnum.f32(float %a0, float %b0)
172 %fmaxnum1 = call float @llvm.maxnum.f32(float %a1, float %b1)
173 %fmaxnum2 = call float @llvm.maxnum.f32(float %a2, float %b2)
174 %fmaxnum3 = call float @llvm.maxnum.f32(float %a3, float %b3)
175 store float %fmaxnum0, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 0), align 4
176 store float %fmaxnum1, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 1), align 4
177 store float %fmaxnum2, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 2), align 4
178 store float %fmaxnum3, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 3), align 4
182 define void @fmaxnum_8f32() #0 {
183 ; SSE-LABEL: @fmaxnum_8f32(
184 ; SSE-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([16 x float]* @srcA32 to <4 x float>*), align 4
185 ; SSE-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* bitcast (float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 4) to <4 x float>*), align 4
186 ; SSE-NEXT: [[TMP3:%.*]] = load <4 x float>, <4 x float>* bitcast ([16 x float]* @srcB32 to <4 x float>*), align 4
187 ; SSE-NEXT: [[TMP4:%.*]] = load <4 x float>, <4 x float>* bitcast (float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 4) to <4 x float>*), align 4
188 ; SSE-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[TMP1]], <4 x float> [[TMP3]])
189 ; SSE-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[TMP2]], <4 x float> [[TMP4]])
190 ; SSE-NEXT: store <4 x float> [[TMP5]], <4 x float>* bitcast ([16 x float]* @dst32 to <4 x float>*), align 4
191 ; SSE-NEXT: store <4 x float> [[TMP6]], <4 x float>* bitcast (float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 4) to <4 x float>*), align 4
194 ; AVX-LABEL: @fmaxnum_8f32(
195 ; AVX-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* bitcast ([16 x float]* @srcA32 to <8 x float>*), align 4
196 ; AVX-NEXT: [[TMP2:%.*]] = load <8 x float>, <8 x float>* bitcast ([16 x float]* @srcB32 to <8 x float>*), align 4
197 ; AVX-NEXT: [[TMP3:%.*]] = call <8 x float> @llvm.maxnum.v8f32(<8 x float> [[TMP1]], <8 x float> [[TMP2]])
198 ; AVX-NEXT: store <8 x float> [[TMP3]], <8 x float>* bitcast ([16 x float]* @dst32 to <8 x float>*), align 4
201 %a0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 0), align 4
202 %a1 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 1), align 4
203 %a2 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 2), align 4
204 %a3 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 3), align 4
205 %a4 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 4), align 4
206 %a5 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 5), align 4
207 %a6 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 6), align 4
208 %a7 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 7), align 4
209 %b0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 0), align 4
210 %b1 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 1), align 4
211 %b2 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 2), align 4
212 %b3 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 3), align 4
213 %b4 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 4), align 4
214 %b5 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 5), align 4
215 %b6 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 6), align 4
216 %b7 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 7), align 4
217 %fmaxnum0 = call float @llvm.maxnum.f32(float %a0, float %b0)
218 %fmaxnum1 = call float @llvm.maxnum.f32(float %a1, float %b1)
219 %fmaxnum2 = call float @llvm.maxnum.f32(float %a2, float %b2)
220 %fmaxnum3 = call float @llvm.maxnum.f32(float %a3, float %b3)
221 %fmaxnum4 = call float @llvm.maxnum.f32(float %a4, float %b4)
222 %fmaxnum5 = call float @llvm.maxnum.f32(float %a5, float %b5)
223 %fmaxnum6 = call float @llvm.maxnum.f32(float %a6, float %b6)
224 %fmaxnum7 = call float @llvm.maxnum.f32(float %a7, float %b7)
225 store float %fmaxnum0, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 0), align 4
226 store float %fmaxnum1, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 1), align 4
227 store float %fmaxnum2, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 2), align 4
228 store float %fmaxnum3, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 3), align 4
229 store float %fmaxnum4, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 4), align 4
230 store float %fmaxnum5, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 5), align 4
231 store float %fmaxnum6, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 6), align 4
232 store float %fmaxnum7, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 7), align 4
236 define void @fmaxnum_16f32() #0 {
237 ; SSE-LABEL: @fmaxnum_16f32(
238 ; SSE-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([16 x float]* @srcA32 to <4 x float>*), align 4
239 ; SSE-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* bitcast (float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 4) to <4 x float>*), align 4
240 ; SSE-NEXT: [[TMP3:%.*]] = load <4 x float>, <4 x float>* bitcast (float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 8) to <4 x float>*), align 4
241 ; SSE-NEXT: [[TMP4:%.*]] = load <4 x float>, <4 x float>* bitcast (float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 12) to <4 x float>*), align 4
242 ; SSE-NEXT: [[TMP5:%.*]] = load <4 x float>, <4 x float>* bitcast ([16 x float]* @srcB32 to <4 x float>*), align 4
243 ; SSE-NEXT: [[TMP6:%.*]] = load <4 x float>, <4 x float>* bitcast (float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 4) to <4 x float>*), align 4
244 ; SSE-NEXT: [[TMP7:%.*]] = load <4 x float>, <4 x float>* bitcast (float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 8) to <4 x float>*), align 4
245 ; SSE-NEXT: [[TMP8:%.*]] = load <4 x float>, <4 x float>* bitcast (float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 12) to <4 x float>*), align 4
246 ; SSE-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[TMP1]], <4 x float> [[TMP5]])
247 ; SSE-NEXT: [[TMP10:%.*]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[TMP2]], <4 x float> [[TMP6]])
248 ; SSE-NEXT: [[TMP11:%.*]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[TMP3]], <4 x float> [[TMP7]])
249 ; SSE-NEXT: [[TMP12:%.*]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[TMP4]], <4 x float> [[TMP8]])
250 ; SSE-NEXT: store <4 x float> [[TMP9]], <4 x float>* bitcast ([16 x float]* @dst32 to <4 x float>*), align 4
251 ; SSE-NEXT: store <4 x float> [[TMP10]], <4 x float>* bitcast (float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 4) to <4 x float>*), align 4
252 ; SSE-NEXT: store <4 x float> [[TMP11]], <4 x float>* bitcast (float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 8) to <4 x float>*), align 4
253 ; SSE-NEXT: store <4 x float> [[TMP12]], <4 x float>* bitcast (float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 12) to <4 x float>*), align 4
256 ; AVX256-LABEL: @fmaxnum_16f32(
257 ; AVX256-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* bitcast ([16 x float]* @srcA32 to <8 x float>*), align 4
258 ; AVX256-NEXT: [[TMP2:%.*]] = load <8 x float>, <8 x float>* bitcast (float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 8) to <8 x float>*), align 4
259 ; AVX256-NEXT: [[TMP3:%.*]] = load <8 x float>, <8 x float>* bitcast ([16 x float]* @srcB32 to <8 x float>*), align 4
260 ; AVX256-NEXT: [[TMP4:%.*]] = load <8 x float>, <8 x float>* bitcast (float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 8) to <8 x float>*), align 4
261 ; AVX256-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maxnum.v8f32(<8 x float> [[TMP1]], <8 x float> [[TMP3]])
262 ; AVX256-NEXT: [[TMP6:%.*]] = call <8 x float> @llvm.maxnum.v8f32(<8 x float> [[TMP2]], <8 x float> [[TMP4]])
263 ; AVX256-NEXT: store <8 x float> [[TMP5]], <8 x float>* bitcast ([16 x float]* @dst32 to <8 x float>*), align 4
264 ; AVX256-NEXT: store <8 x float> [[TMP6]], <8 x float>* bitcast (float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 8) to <8 x float>*), align 4
265 ; AVX256-NEXT: ret void
267 ; AVX512-LABEL: @fmaxnum_16f32(
268 ; AVX512-NEXT: [[TMP1:%.*]] = load <16 x float>, <16 x float>* bitcast ([16 x float]* @srcA32 to <16 x float>*), align 4
269 ; AVX512-NEXT: [[TMP2:%.*]] = load <16 x float>, <16 x float>* bitcast ([16 x float]* @srcB32 to <16 x float>*), align 4
270 ; AVX512-NEXT: [[TMP3:%.*]] = call <16 x float> @llvm.maxnum.v16f32(<16 x float> [[TMP1]], <16 x float> [[TMP2]])
271 ; AVX512-NEXT: store <16 x float> [[TMP3]], <16 x float>* bitcast ([16 x float]* @dst32 to <16 x float>*), align 4
272 ; AVX512-NEXT: ret void
274 %a0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 0), align 4
275 %a1 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 1), align 4
276 %a2 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 2), align 4
277 %a3 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 3), align 4
278 %a4 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 4), align 4
279 %a5 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 5), align 4
280 %a6 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 6), align 4
281 %a7 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 7), align 4
282 %a8 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 8), align 4
283 %a9 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 9), align 4
284 %a10 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 10), align 4
285 %a11 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 11), align 4
286 %a12 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 12), align 4
287 %a13 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 13), align 4
288 %a14 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 14), align 4
289 %a15 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 15), align 4
290 %b0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 0), align 4
291 %b1 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 1), align 4
292 %b2 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 2), align 4
293 %b3 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 3), align 4
294 %b4 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 4), align 4
295 %b5 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 5), align 4
296 %b6 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 6), align 4
297 %b7 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 7), align 4
298 %b8 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 8), align 4
299 %b9 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 9), align 4
300 %b10 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 10), align 4
301 %b11 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 11), align 4
302 %b12 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 12), align 4
303 %b13 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 13), align 4
304 %b14 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 14), align 4
305 %b15 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 15), align 4
306 %fmaxnum0 = call float @llvm.maxnum.f32(float %a0 , float %b0 )
307 %fmaxnum1 = call float @llvm.maxnum.f32(float %a1 , float %b1 )
308 %fmaxnum2 = call float @llvm.maxnum.f32(float %a2 , float %b2 )
309 %fmaxnum3 = call float @llvm.maxnum.f32(float %a3 , float %b3 )
310 %fmaxnum4 = call float @llvm.maxnum.f32(float %a4 , float %b4 )
311 %fmaxnum5 = call float @llvm.maxnum.f32(float %a5 , float %b5 )
312 %fmaxnum6 = call float @llvm.maxnum.f32(float %a6 , float %b6 )
313 %fmaxnum7 = call float @llvm.maxnum.f32(float %a7 , float %b7 )
314 %fmaxnum8 = call float @llvm.maxnum.f32(float %a8 , float %b8 )
315 %fmaxnum9 = call float @llvm.maxnum.f32(float %a9 , float %b9 )
316 %fmaxnum10 = call float @llvm.maxnum.f32(float %a10, float %b10)
317 %fmaxnum11 = call float @llvm.maxnum.f32(float %a11, float %b11)
318 %fmaxnum12 = call float @llvm.maxnum.f32(float %a12, float %b12)
319 %fmaxnum13 = call float @llvm.maxnum.f32(float %a13, float %b13)
320 %fmaxnum14 = call float @llvm.maxnum.f32(float %a14, float %b14)
321 %fmaxnum15 = call float @llvm.maxnum.f32(float %a15, float %b15)
322 store float %fmaxnum0 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 0), align 4
323 store float %fmaxnum1 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 1), align 4
324 store float %fmaxnum2 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 2), align 4
325 store float %fmaxnum3 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 3), align 4
326 store float %fmaxnum4 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 4), align 4
327 store float %fmaxnum5 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 5), align 4
328 store float %fmaxnum6 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 6), align 4
329 store float %fmaxnum7 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 7), align 4
330 store float %fmaxnum8 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 8), align 4
331 store float %fmaxnum9 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 9), align 4
332 store float %fmaxnum10, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 10), align 4
333 store float %fmaxnum11, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 11), align 4
334 store float %fmaxnum12, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 12), align 4
335 store float %fmaxnum13, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 13), align 4
336 store float %fmaxnum14, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 14), align 4
337 store float %fmaxnum15, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 15), align 4
341 define float @reduction_v4f32_fast(float* %p) {
342 ; CHECK-LABEL: @reduction_v4f32_fast(
343 ; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds float, float* [[P:%.*]], i64 1
344 ; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds float, float* [[P]], i64 2
345 ; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds float, float* [[P]], i64 3
346 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[P]] to <4 x float>*
347 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
348 ; CHECK-NEXT: [[TMP3:%.*]] = call fast float @llvm.vector.reduce.fmax.v4f32(<4 x float> [[TMP2]])
349 ; CHECK-NEXT: ret float [[TMP3]]
351 %g1 = getelementptr inbounds float, float* %p, i64 1
352 %g2 = getelementptr inbounds float, float* %p, i64 2
353 %g3 = getelementptr inbounds float, float* %p, i64 3
354 %t0 = load float, float* %p, align 4
355 %t1 = load float, float* %g1, align 4
356 %t2 = load float, float* %g2, align 4
357 %t3 = load float, float* %g3, align 4
358 %m1 = tail call fast float @llvm.maxnum.f32(float %t1, float %t0)
359 %m2 = tail call fast float @llvm.maxnum.f32(float %t2, float %m1)
360 %m3 = tail call fast float @llvm.maxnum.f32(float %t3, float %m2)
364 define float @reduction_v4f32_nnan(float* %p) {
365 ; CHECK-LABEL: @reduction_v4f32_nnan(
366 ; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds float, float* [[P:%.*]], i64 1
367 ; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds float, float* [[P]], i64 2
368 ; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds float, float* [[P]], i64 3
369 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[P]] to <4 x float>*
370 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
371 ; CHECK-NEXT: [[TMP3:%.*]] = call nnan float @llvm.vector.reduce.fmax.v4f32(<4 x float> [[TMP2]])
372 ; CHECK-NEXT: ret float [[TMP3]]
374 %g1 = getelementptr inbounds float, float* %p, i64 1
375 %g2 = getelementptr inbounds float, float* %p, i64 2
376 %g3 = getelementptr inbounds float, float* %p, i64 3
377 %t0 = load float, float* %p, align 4
378 %t1 = load float, float* %g1, align 4
379 %t2 = load float, float* %g2, align 4
380 %t3 = load float, float* %g3, align 4
381 %m1 = tail call nnan float @llvm.maxnum.f32(float %t1, float %t0)
382 %m2 = tail call nnan float @llvm.maxnum.f32(float %t2, float %m1)
383 %m3 = tail call nnan float @llvm.maxnum.f32(float %t3, float %m2)
387 ; Negative test - must have nnan.
389 define float @reduction_v4f32_not_fast(float* %p) {
390 ; CHECK-LABEL: @reduction_v4f32_not_fast(
391 ; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds float, float* [[P:%.*]], i64 1
392 ; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds float, float* [[P]], i64 2
393 ; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds float, float* [[P]], i64 3
394 ; CHECK-NEXT: [[T0:%.*]] = load float, float* [[P]], align 4
395 ; CHECK-NEXT: [[T1:%.*]] = load float, float* [[G1]], align 4
396 ; CHECK-NEXT: [[T2:%.*]] = load float, float* [[G2]], align 4
397 ; CHECK-NEXT: [[T3:%.*]] = load float, float* [[G3]], align 4
398 ; CHECK-NEXT: [[M1:%.*]] = tail call float @llvm.maxnum.f32(float [[T1]], float [[T0]])
399 ; CHECK-NEXT: [[M2:%.*]] = tail call float @llvm.maxnum.f32(float [[T2]], float [[M1]])
400 ; CHECK-NEXT: [[M3:%.*]] = tail call float @llvm.maxnum.f32(float [[T3]], float [[M2]])
401 ; CHECK-NEXT: ret float [[M3]]
403 %g1 = getelementptr inbounds float, float* %p, i64 1
404 %g2 = getelementptr inbounds float, float* %p, i64 2
405 %g3 = getelementptr inbounds float, float* %p, i64 3
406 %t0 = load float, float* %p, align 4
407 %t1 = load float, float* %g1, align 4
408 %t2 = load float, float* %g2, align 4
409 %t3 = load float, float* %g3, align 4
410 %m1 = tail call float @llvm.maxnum.f32(float %t1, float %t0)
411 %m2 = tail call float @llvm.maxnum.f32(float %t2, float %m1)
412 %m3 = tail call float @llvm.maxnum.f32(float %t3, float %m2)
416 define float @reduction_v8f32_fast(float* %p) {
417 ; CHECK-LABEL: @reduction_v8f32_fast(
418 ; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds float, float* [[P:%.*]], i64 1
419 ; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds float, float* [[P]], i64 2
420 ; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds float, float* [[P]], i64 3
421 ; CHECK-NEXT: [[G4:%.*]] = getelementptr inbounds float, float* [[P]], i64 4
422 ; CHECK-NEXT: [[G5:%.*]] = getelementptr inbounds float, float* [[P]], i64 5
423 ; CHECK-NEXT: [[G6:%.*]] = getelementptr inbounds float, float* [[P]], i64 6
424 ; CHECK-NEXT: [[G7:%.*]] = getelementptr inbounds float, float* [[P]], i64 7
425 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[P]] to <8 x float>*
426 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x float>, <8 x float>* [[TMP1]], align 4
427 ; CHECK-NEXT: [[TMP3:%.*]] = call fast float @llvm.vector.reduce.fmax.v8f32(<8 x float> [[TMP2]])
428 ; CHECK-NEXT: ret float [[TMP3]]
430 %g1 = getelementptr inbounds float, float* %p, i64 1
431 %g2 = getelementptr inbounds float, float* %p, i64 2
432 %g3 = getelementptr inbounds float, float* %p, i64 3
433 %g4 = getelementptr inbounds float, float* %p, i64 4
434 %g5 = getelementptr inbounds float, float* %p, i64 5
435 %g6 = getelementptr inbounds float, float* %p, i64 6
436 %g7 = getelementptr inbounds float, float* %p, i64 7
437 %t0 = load float, float* %p, align 4
438 %t1 = load float, float* %g1, align 4
439 %t2 = load float, float* %g2, align 4
440 %t3 = load float, float* %g3, align 4
441 %t4 = load float, float* %g4, align 4
442 %t5 = load float, float* %g5, align 4
443 %t6 = load float, float* %g6, align 4
444 %t7 = load float, float* %g7, align 4
445 %m1 = tail call fast float @llvm.maxnum.f32(float %t1, float %t0)
446 %m2 = tail call fast float @llvm.maxnum.f32(float %t2, float %m1)
447 %m3 = tail call fast float @llvm.maxnum.f32(float %t3, float %m2)
448 %m4 = tail call fast float @llvm.maxnum.f32(float %t4, float %m3)
449 %m5 = tail call fast float @llvm.maxnum.f32(float %m4, float %t6)
450 %m6 = tail call fast float @llvm.maxnum.f32(float %m5, float %t5)
451 %m7 = tail call fast float @llvm.maxnum.f32(float %m6, float %t7)
455 define double @reduction_v2f64_fast(double* %p) {
456 ; CHECK-LABEL: @reduction_v2f64_fast(
457 ; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds double, double* [[P:%.*]], i64 1
458 ; CHECK-NEXT: [[T0:%.*]] = load double, double* [[P]], align 4
459 ; CHECK-NEXT: [[T1:%.*]] = load double, double* [[G1]], align 4
460 ; CHECK-NEXT: [[M1:%.*]] = tail call fast double @llvm.maxnum.f64(double [[T1]], double [[T0]])
461 ; CHECK-NEXT: ret double [[M1]]
463 %g1 = getelementptr inbounds double, double* %p, i64 1
464 %t0 = load double, double* %p, align 4
465 %t1 = load double, double* %g1, align 4
466 %m1 = tail call fast double @llvm.maxnum.f64(double %t1, double %t0)
470 define double @reduction_v4f64_fast(double* %p) {
471 ; CHECK-LABEL: @reduction_v4f64_fast(
472 ; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds double, double* [[P:%.*]], i64 1
473 ; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds double, double* [[P]], i64 2
474 ; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds double, double* [[P]], i64 3
475 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast double* [[P]] to <4 x double>*
476 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x double>, <4 x double>* [[TMP1]], align 4
477 ; CHECK-NEXT: [[TMP3:%.*]] = call fast double @llvm.vector.reduce.fmax.v4f64(<4 x double> [[TMP2]])
478 ; CHECK-NEXT: ret double [[TMP3]]
480 %g1 = getelementptr inbounds double, double* %p, i64 1
481 %g2 = getelementptr inbounds double, double* %p, i64 2
482 %g3 = getelementptr inbounds double, double* %p, i64 3
483 %t0 = load double, double* %p, align 4
484 %t1 = load double, double* %g1, align 4
485 %t2 = load double, double* %g2, align 4
486 %t3 = load double, double* %g3, align 4
487 %m1 = tail call fast double @llvm.maxnum.f64(double %t1, double %t0)
488 %m2 = tail call fast double @llvm.maxnum.f64(double %t2, double %m1)
489 %m3 = tail call fast double @llvm.maxnum.f64(double %t3, double %m2)
493 ; Negative test - must have nnan.
495 define double @reduction_v4f64_wrong_fmf(double* %p) {
496 ; CHECK-LABEL: @reduction_v4f64_wrong_fmf(
497 ; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds double, double* [[P:%.*]], i64 1
498 ; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds double, double* [[P]], i64 2
499 ; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds double, double* [[P]], i64 3
500 ; CHECK-NEXT: [[T0:%.*]] = load double, double* [[P]], align 4
501 ; CHECK-NEXT: [[T1:%.*]] = load double, double* [[G1]], align 4
502 ; CHECK-NEXT: [[T2:%.*]] = load double, double* [[G2]], align 4
503 ; CHECK-NEXT: [[T3:%.*]] = load double, double* [[G3]], align 4
504 ; CHECK-NEXT: [[M1:%.*]] = tail call ninf nsz double @llvm.maxnum.f64(double [[T1]], double [[T0]])
505 ; CHECK-NEXT: [[M2:%.*]] = tail call ninf nsz double @llvm.maxnum.f64(double [[T2]], double [[M1]])
506 ; CHECK-NEXT: [[M3:%.*]] = tail call ninf nsz double @llvm.maxnum.f64(double [[T3]], double [[M2]])
507 ; CHECK-NEXT: ret double [[M3]]
509 %g1 = getelementptr inbounds double, double* %p, i64 1
510 %g2 = getelementptr inbounds double, double* %p, i64 2
511 %g3 = getelementptr inbounds double, double* %p, i64 3
512 %t0 = load double, double* %p, align 4
513 %t1 = load double, double* %g1, align 4
514 %t2 = load double, double* %g2, align 4
515 %t3 = load double, double* %g3, align 4
516 %m1 = tail call ninf nsz double @llvm.maxnum.f64(double %t1, double %t0)
517 %m2 = tail call ninf nsz double @llvm.maxnum.f64(double %t2, double %m1)
518 %m3 = tail call ninf nsz double @llvm.maxnum.f64(double %t3, double %m2)
522 attributes #0 = { nounwind }