1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S | FileCheck %s --check-prefix=SSE2
3 ; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7 -passes=slp-vectorizer -S | FileCheck %s --check-prefix=SSE41
4 ; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -passes=slp-vectorizer -S | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
5 ; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -passes=slp-vectorizer -S | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
6 ; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skylake-avx512 -mattr=-prefer-256-bit -passes=slp-vectorizer -S | FileCheck %s --check-prefix=AVX --check-prefix=AVX512
7 ; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skylake-avx512 -mattr=+prefer-256-bit -passes=slp-vectorizer -S | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
9 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
11 @src64 = common global [8 x double] zeroinitializer, align 64
12 @dst64 = common global [8 x double] zeroinitializer, align 64
13 @src32 = common global [16 x float] zeroinitializer, align 64
14 @dst32 = common global [16 x float] zeroinitializer, align 64
16 declare double @llvm.ceil.f64(double %p)
17 declare double @llvm.floor.f64(double %p)
18 declare double @llvm.nearbyint.f64(double %p)
19 declare double @llvm.rint.f64(double %p)
20 declare double @llvm.trunc.f64(double %p)
22 declare float @llvm.ceil.f32(float %p)
23 declare float @llvm.floor.f32(float %p)
24 declare float @llvm.nearbyint.f32(float %p)
25 declare float @llvm.rint.f32(float %p)
26 declare float @llvm.trunc.f32(float %p)
28 define void @ceil_2f64() #0 {
29 ; SSE2-LABEL: @ceil_2f64(
30 ; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
31 ; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
32 ; SSE2-NEXT: [[CEIL0:%.*]] = call double @llvm.ceil.f64(double [[LD0]])
33 ; SSE2-NEXT: [[CEIL1:%.*]] = call double @llvm.ceil.f64(double [[LD1]])
34 ; SSE2-NEXT: store double [[CEIL0]], ptr @dst64, align 8
35 ; SSE2-NEXT: store double [[CEIL1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
38 ; SSE41-LABEL: @ceil_2f64(
39 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
40 ; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP1]])
41 ; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
42 ; SSE41-NEXT: ret void
44 ; AVX-LABEL: @ceil_2f64(
45 ; AVX-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
46 ; AVX-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP1]])
47 ; AVX-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
50 %ld0 = load double, ptr @src64, align 8
51 %ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
52 %ceil0 = call double @llvm.ceil.f64(double %ld0)
53 %ceil1 = call double @llvm.ceil.f64(double %ld1)
54 store double %ceil0, ptr @dst64, align 8
55 store double %ceil1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
59 define void @ceil_4f64() #0 {
60 ; SSE2-LABEL: @ceil_4f64(
61 ; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
62 ; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
63 ; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
64 ; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
65 ; SSE2-NEXT: [[CEIL0:%.*]] = call double @llvm.ceil.f64(double [[LD0]])
66 ; SSE2-NEXT: [[CEIL1:%.*]] = call double @llvm.ceil.f64(double [[LD1]])
67 ; SSE2-NEXT: [[CEIL2:%.*]] = call double @llvm.ceil.f64(double [[LD2]])
68 ; SSE2-NEXT: [[CEIL3:%.*]] = call double @llvm.ceil.f64(double [[LD3]])
69 ; SSE2-NEXT: store double [[CEIL0]], ptr @dst64, align 8
70 ; SSE2-NEXT: store double [[CEIL1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
71 ; SSE2-NEXT: store double [[CEIL2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
72 ; SSE2-NEXT: store double [[CEIL3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
75 ; SSE41-LABEL: @ceil_4f64(
76 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
77 ; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP1]])
78 ; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
79 ; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
80 ; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP3]])
81 ; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
82 ; SSE41-NEXT: ret void
84 ; AVX-LABEL: @ceil_4f64(
85 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
86 ; AVX-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.ceil.v4f64(<4 x double> [[TMP1]])
87 ; AVX-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
90 %ld0 = load double, ptr @src64, align 8
91 %ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
92 %ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
93 %ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
94 %ceil0 = call double @llvm.ceil.f64(double %ld0)
95 %ceil1 = call double @llvm.ceil.f64(double %ld1)
96 %ceil2 = call double @llvm.ceil.f64(double %ld2)
97 %ceil3 = call double @llvm.ceil.f64(double %ld3)
98 store double %ceil0, ptr @dst64, align 8
99 store double %ceil1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
100 store double %ceil2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
101 store double %ceil3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
105 define void @ceil_8f64() #0 {
106 ; SSE2-LABEL: @ceil_8f64(
107 ; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
108 ; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
109 ; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
110 ; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
111 ; SSE2-NEXT: [[LD4:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
112 ; SSE2-NEXT: [[LD5:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
113 ; SSE2-NEXT: [[LD6:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
114 ; SSE2-NEXT: [[LD7:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
115 ; SSE2-NEXT: [[CEIL0:%.*]] = call double @llvm.ceil.f64(double [[LD0]])
116 ; SSE2-NEXT: [[CEIL1:%.*]] = call double @llvm.ceil.f64(double [[LD1]])
117 ; SSE2-NEXT: [[CEIL2:%.*]] = call double @llvm.ceil.f64(double [[LD2]])
118 ; SSE2-NEXT: [[CEIL3:%.*]] = call double @llvm.ceil.f64(double [[LD3]])
119 ; SSE2-NEXT: [[CEIL4:%.*]] = call double @llvm.ceil.f64(double [[LD4]])
120 ; SSE2-NEXT: [[CEIL5:%.*]] = call double @llvm.ceil.f64(double [[LD5]])
121 ; SSE2-NEXT: [[CEIL6:%.*]] = call double @llvm.ceil.f64(double [[LD6]])
122 ; SSE2-NEXT: [[CEIL7:%.*]] = call double @llvm.ceil.f64(double [[LD7]])
123 ; SSE2-NEXT: store double [[CEIL0]], ptr @dst64, align 8
124 ; SSE2-NEXT: store double [[CEIL1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
125 ; SSE2-NEXT: store double [[CEIL2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
126 ; SSE2-NEXT: store double [[CEIL3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
127 ; SSE2-NEXT: store double [[CEIL4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
128 ; SSE2-NEXT: store double [[CEIL5]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
129 ; SSE2-NEXT: store double [[CEIL6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
130 ; SSE2-NEXT: store double [[CEIL7]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
131 ; SSE2-NEXT: ret void
133 ; SSE41-LABEL: @ceil_8f64(
134 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
135 ; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP1]])
136 ; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
137 ; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
138 ; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP3]])
139 ; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
140 ; SSE41-NEXT: [[TMP5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
141 ; SSE41-NEXT: [[TMP6:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP5]])
142 ; SSE41-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
143 ; SSE41-NEXT: [[TMP7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
144 ; SSE41-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP7]])
145 ; SSE41-NEXT: store <2 x double> [[TMP8]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
146 ; SSE41-NEXT: ret void
148 ; AVX1-LABEL: @ceil_8f64(
149 ; AVX1-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
150 ; AVX1-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.ceil.v4f64(<4 x double> [[TMP1]])
151 ; AVX1-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
152 ; AVX1-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
153 ; AVX1-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.ceil.v4f64(<4 x double> [[TMP3]])
154 ; AVX1-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
155 ; AVX1-NEXT: ret void
157 ; AVX2-LABEL: @ceil_8f64(
158 ; AVX2-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
159 ; AVX2-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.ceil.v4f64(<4 x double> [[TMP1]])
160 ; AVX2-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
161 ; AVX2-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
162 ; AVX2-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.ceil.v4f64(<4 x double> [[TMP3]])
163 ; AVX2-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
164 ; AVX2-NEXT: ret void
166 ; AVX512-LABEL: @ceil_8f64(
167 ; AVX512-NEXT: [[TMP1:%.*]] = load <8 x double>, ptr @src64, align 8
168 ; AVX512-NEXT: [[TMP2:%.*]] = call <8 x double> @llvm.ceil.v8f64(<8 x double> [[TMP1]])
169 ; AVX512-NEXT: store <8 x double> [[TMP2]], ptr @dst64, align 8
170 ; AVX512-NEXT: ret void
172 %ld0 = load double, ptr @src64, align 8
173 %ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
174 %ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
175 %ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
176 %ld4 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
177 %ld5 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
178 %ld6 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
179 %ld7 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
180 %ceil0 = call double @llvm.ceil.f64(double %ld0)
181 %ceil1 = call double @llvm.ceil.f64(double %ld1)
182 %ceil2 = call double @llvm.ceil.f64(double %ld2)
183 %ceil3 = call double @llvm.ceil.f64(double %ld3)
184 %ceil4 = call double @llvm.ceil.f64(double %ld4)
185 %ceil5 = call double @llvm.ceil.f64(double %ld5)
186 %ceil6 = call double @llvm.ceil.f64(double %ld6)
187 %ceil7 = call double @llvm.ceil.f64(double %ld7)
188 store double %ceil0, ptr @dst64, align 8
189 store double %ceil1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
190 store double %ceil2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
191 store double %ceil3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
192 store double %ceil4, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
193 store double %ceil5, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
194 store double %ceil6, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
195 store double %ceil7, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
199 define void @floor_2f64() #0 {
200 ; SSE2-LABEL: @floor_2f64(
201 ; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
202 ; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
203 ; SSE2-NEXT: [[FLOOR0:%.*]] = call double @llvm.floor.f64(double [[LD0]])
204 ; SSE2-NEXT: [[FLOOR1:%.*]] = call double @llvm.floor.f64(double [[LD1]])
205 ; SSE2-NEXT: store double [[FLOOR0]], ptr @dst64, align 8
206 ; SSE2-NEXT: store double [[FLOOR1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
207 ; SSE2-NEXT: ret void
209 ; SSE41-LABEL: @floor_2f64(
210 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
211 ; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP1]])
212 ; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
213 ; SSE41-NEXT: ret void
215 ; AVX-LABEL: @floor_2f64(
216 ; AVX-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
217 ; AVX-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP1]])
218 ; AVX-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
221 %ld0 = load double, ptr @src64, align 8
222 %ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
223 %floor0 = call double @llvm.floor.f64(double %ld0)
224 %floor1 = call double @llvm.floor.f64(double %ld1)
225 store double %floor0, ptr @dst64, align 8
226 store double %floor1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
230 define void @floor_4f64() #0 {
231 ; SSE2-LABEL: @floor_4f64(
232 ; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
233 ; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
234 ; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
235 ; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
236 ; SSE2-NEXT: [[FLOOR0:%.*]] = call double @llvm.floor.f64(double [[LD0]])
237 ; SSE2-NEXT: [[FLOOR1:%.*]] = call double @llvm.floor.f64(double [[LD1]])
238 ; SSE2-NEXT: [[FLOOR2:%.*]] = call double @llvm.floor.f64(double [[LD2]])
239 ; SSE2-NEXT: [[FLOOR3:%.*]] = call double @llvm.floor.f64(double [[LD3]])
240 ; SSE2-NEXT: store double [[FLOOR0]], ptr @dst64, align 8
241 ; SSE2-NEXT: store double [[FLOOR1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
242 ; SSE2-NEXT: store double [[FLOOR2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
243 ; SSE2-NEXT: store double [[FLOOR3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
244 ; SSE2-NEXT: ret void
246 ; SSE41-LABEL: @floor_4f64(
247 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
248 ; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP1]])
249 ; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
250 ; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
251 ; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP3]])
252 ; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
253 ; SSE41-NEXT: ret void
255 ; AVX-LABEL: @floor_4f64(
256 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
257 ; AVX-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.floor.v4f64(<4 x double> [[TMP1]])
258 ; AVX-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
261 %ld0 = load double, ptr @src64, align 8
262 %ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
263 %ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
264 %ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
265 %floor0 = call double @llvm.floor.f64(double %ld0)
266 %floor1 = call double @llvm.floor.f64(double %ld1)
267 %floor2 = call double @llvm.floor.f64(double %ld2)
268 %floor3 = call double @llvm.floor.f64(double %ld3)
269 store double %floor0, ptr @dst64, align 8
270 store double %floor1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
271 store double %floor2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
272 store double %floor3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
276 define void @floor_8f64() #0 {
277 ; SSE2-LABEL: @floor_8f64(
278 ; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
279 ; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
280 ; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
281 ; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
282 ; SSE2-NEXT: [[LD4:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
283 ; SSE2-NEXT: [[LD5:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
284 ; SSE2-NEXT: [[LD6:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
285 ; SSE2-NEXT: [[LD7:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
286 ; SSE2-NEXT: [[FLOOR0:%.*]] = call double @llvm.floor.f64(double [[LD0]])
287 ; SSE2-NEXT: [[FLOOR1:%.*]] = call double @llvm.floor.f64(double [[LD1]])
288 ; SSE2-NEXT: [[FLOOR2:%.*]] = call double @llvm.floor.f64(double [[LD2]])
289 ; SSE2-NEXT: [[FLOOR3:%.*]] = call double @llvm.floor.f64(double [[LD3]])
290 ; SSE2-NEXT: [[FLOOR4:%.*]] = call double @llvm.floor.f64(double [[LD4]])
291 ; SSE2-NEXT: [[FLOOR5:%.*]] = call double @llvm.floor.f64(double [[LD5]])
292 ; SSE2-NEXT: [[FLOOR6:%.*]] = call double @llvm.floor.f64(double [[LD6]])
293 ; SSE2-NEXT: [[FLOOR7:%.*]] = call double @llvm.floor.f64(double [[LD7]])
294 ; SSE2-NEXT: store double [[FLOOR0]], ptr @dst64, align 8
295 ; SSE2-NEXT: store double [[FLOOR1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
296 ; SSE2-NEXT: store double [[FLOOR2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
297 ; SSE2-NEXT: store double [[FLOOR3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
298 ; SSE2-NEXT: store double [[FLOOR4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
299 ; SSE2-NEXT: store double [[FLOOR5]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
300 ; SSE2-NEXT: store double [[FLOOR6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
301 ; SSE2-NEXT: store double [[FLOOR7]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
302 ; SSE2-NEXT: ret void
304 ; SSE41-LABEL: @floor_8f64(
305 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
306 ; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP1]])
307 ; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
308 ; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
309 ; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP3]])
310 ; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
311 ; SSE41-NEXT: [[TMP5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
312 ; SSE41-NEXT: [[TMP6:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP5]])
313 ; SSE41-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
314 ; SSE41-NEXT: [[TMP7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
315 ; SSE41-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP7]])
316 ; SSE41-NEXT: store <2 x double> [[TMP8]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
317 ; SSE41-NEXT: ret void
319 ; AVX1-LABEL: @floor_8f64(
320 ; AVX1-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
321 ; AVX1-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.floor.v4f64(<4 x double> [[TMP1]])
322 ; AVX1-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
323 ; AVX1-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
324 ; AVX1-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.floor.v4f64(<4 x double> [[TMP3]])
325 ; AVX1-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
326 ; AVX1-NEXT: ret void
328 ; AVX2-LABEL: @floor_8f64(
329 ; AVX2-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
330 ; AVX2-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.floor.v4f64(<4 x double> [[TMP1]])
331 ; AVX2-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
332 ; AVX2-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
333 ; AVX2-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.floor.v4f64(<4 x double> [[TMP3]])
334 ; AVX2-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
335 ; AVX2-NEXT: ret void
337 ; AVX512-LABEL: @floor_8f64(
338 ; AVX512-NEXT: [[TMP1:%.*]] = load <8 x double>, ptr @src64, align 8
339 ; AVX512-NEXT: [[TMP2:%.*]] = call <8 x double> @llvm.floor.v8f64(<8 x double> [[TMP1]])
340 ; AVX512-NEXT: store <8 x double> [[TMP2]], ptr @dst64, align 8
341 ; AVX512-NEXT: ret void
343 %ld0 = load double, ptr @src64, align 8
344 %ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
345 %ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
346 %ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
347 %ld4 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
348 %ld5 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
349 %ld6 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
350 %ld7 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
351 %floor0 = call double @llvm.floor.f64(double %ld0)
352 %floor1 = call double @llvm.floor.f64(double %ld1)
353 %floor2 = call double @llvm.floor.f64(double %ld2)
354 %floor3 = call double @llvm.floor.f64(double %ld3)
355 %floor4 = call double @llvm.floor.f64(double %ld4)
356 %floor5 = call double @llvm.floor.f64(double %ld5)
357 %floor6 = call double @llvm.floor.f64(double %ld6)
358 %floor7 = call double @llvm.floor.f64(double %ld7)
359 store double %floor0, ptr @dst64, align 8
360 store double %floor1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
361 store double %floor2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
362 store double %floor3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
363 store double %floor4, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
364 store double %floor5, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
365 store double %floor6, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
366 store double %floor7, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
370 define void @nearbyint_2f64() #0 {
371 ; SSE2-LABEL: @nearbyint_2f64(
372 ; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
373 ; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
374 ; SSE2-NEXT: [[NEARBYINT0:%.*]] = call double @llvm.nearbyint.f64(double [[LD0]])
375 ; SSE2-NEXT: [[NEARBYINT1:%.*]] = call double @llvm.nearbyint.f64(double [[LD1]])
376 ; SSE2-NEXT: store double [[NEARBYINT0]], ptr @dst64, align 8
377 ; SSE2-NEXT: store double [[NEARBYINT1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
378 ; SSE2-NEXT: ret void
380 ; SSE41-LABEL: @nearbyint_2f64(
381 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
382 ; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP1]])
383 ; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
384 ; SSE41-NEXT: ret void
386 ; AVX-LABEL: @nearbyint_2f64(
387 ; AVX-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
388 ; AVX-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP1]])
389 ; AVX-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
392 %ld0 = load double, ptr @src64, align 8
393 %ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
394 %nearbyint0 = call double @llvm.nearbyint.f64(double %ld0)
395 %nearbyint1 = call double @llvm.nearbyint.f64(double %ld1)
396 store double %nearbyint0, ptr @dst64, align 8
397 store double %nearbyint1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
401 define void @nearbyint_4f64() #0 {
402 ; SSE2-LABEL: @nearbyint_4f64(
403 ; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
404 ; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
405 ; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
406 ; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
407 ; SSE2-NEXT: [[NEARBYINT0:%.*]] = call double @llvm.nearbyint.f64(double [[LD0]])
408 ; SSE2-NEXT: [[NEARBYINT1:%.*]] = call double @llvm.nearbyint.f64(double [[LD1]])
409 ; SSE2-NEXT: [[NEARBYINT2:%.*]] = call double @llvm.nearbyint.f64(double [[LD2]])
410 ; SSE2-NEXT: [[NEARBYINT3:%.*]] = call double @llvm.nearbyint.f64(double [[LD3]])
411 ; SSE2-NEXT: store double [[NEARBYINT0]], ptr @dst64, align 8
412 ; SSE2-NEXT: store double [[NEARBYINT1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
413 ; SSE2-NEXT: store double [[NEARBYINT2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
414 ; SSE2-NEXT: store double [[NEARBYINT3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
415 ; SSE2-NEXT: ret void
417 ; SSE41-LABEL: @nearbyint_4f64(
418 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
419 ; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP1]])
420 ; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
421 ; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
422 ; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP3]])
423 ; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
424 ; SSE41-NEXT: ret void
426 ; AVX-LABEL: @nearbyint_4f64(
427 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
428 ; AVX-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> [[TMP1]])
429 ; AVX-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
432 %ld0 = load double, ptr @src64, align 8
433 %ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
434 %ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
435 %ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
436 %nearbyint0 = call double @llvm.nearbyint.f64(double %ld0)
437 %nearbyint1 = call double @llvm.nearbyint.f64(double %ld1)
438 %nearbyint2 = call double @llvm.nearbyint.f64(double %ld2)
439 %nearbyint3 = call double @llvm.nearbyint.f64(double %ld3)
440 store double %nearbyint0, ptr @dst64, align 8
441 store double %nearbyint1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
442 store double %nearbyint2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
443 store double %nearbyint3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
447 define void @nearbyint_8f64() #0 {
448 ; SSE2-LABEL: @nearbyint_8f64(
449 ; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
450 ; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
451 ; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
452 ; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
453 ; SSE2-NEXT: [[LD4:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
454 ; SSE2-NEXT: [[LD5:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
455 ; SSE2-NEXT: [[LD6:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
456 ; SSE2-NEXT: [[LD7:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
457 ; SSE2-NEXT: [[NEARBYINT0:%.*]] = call double @llvm.nearbyint.f64(double [[LD0]])
458 ; SSE2-NEXT: [[NEARBYINT1:%.*]] = call double @llvm.nearbyint.f64(double [[LD1]])
459 ; SSE2-NEXT: [[NEARBYINT2:%.*]] = call double @llvm.nearbyint.f64(double [[LD2]])
460 ; SSE2-NEXT: [[NEARBYINT3:%.*]] = call double @llvm.nearbyint.f64(double [[LD3]])
461 ; SSE2-NEXT: [[NEARBYINT4:%.*]] = call double @llvm.nearbyint.f64(double [[LD4]])
462 ; SSE2-NEXT: [[NEARBYINT5:%.*]] = call double @llvm.nearbyint.f64(double [[LD5]])
463 ; SSE2-NEXT: [[NEARBYINT6:%.*]] = call double @llvm.nearbyint.f64(double [[LD6]])
464 ; SSE2-NEXT: [[NEARBYINT7:%.*]] = call double @llvm.nearbyint.f64(double [[LD7]])
465 ; SSE2-NEXT: store double [[NEARBYINT0]], ptr @dst64, align 8
466 ; SSE2-NEXT: store double [[NEARBYINT1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
467 ; SSE2-NEXT: store double [[NEARBYINT2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
468 ; SSE2-NEXT: store double [[NEARBYINT3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
469 ; SSE2-NEXT: store double [[NEARBYINT4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
470 ; SSE2-NEXT: store double [[NEARBYINT5]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
471 ; SSE2-NEXT: store double [[NEARBYINT6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
472 ; SSE2-NEXT: store double [[NEARBYINT7]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
473 ; SSE2-NEXT: ret void
475 ; SSE41-LABEL: @nearbyint_8f64(
476 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
477 ; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP1]])
478 ; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
479 ; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
480 ; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP3]])
481 ; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
482 ; SSE41-NEXT: [[TMP5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
483 ; SSE41-NEXT: [[TMP6:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP5]])
484 ; SSE41-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
485 ; SSE41-NEXT: [[TMP7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
486 ; SSE41-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP7]])
487 ; SSE41-NEXT: store <2 x double> [[TMP8]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
488 ; SSE41-NEXT: ret void
490 ; AVX1-LABEL: @nearbyint_8f64(
491 ; AVX1-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
492 ; AVX1-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> [[TMP1]])
493 ; AVX1-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
494 ; AVX1-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
495 ; AVX1-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> [[TMP3]])
496 ; AVX1-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
497 ; AVX1-NEXT: ret void
499 ; AVX2-LABEL: @nearbyint_8f64(
500 ; AVX2-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
501 ; AVX2-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> [[TMP1]])
502 ; AVX2-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
503 ; AVX2-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
504 ; AVX2-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> [[TMP3]])
505 ; AVX2-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
506 ; AVX2-NEXT: ret void
508 ; AVX512-LABEL: @nearbyint_8f64(
509 ; AVX512-NEXT: [[TMP1:%.*]] = load <8 x double>, ptr @src64, align 8
510 ; AVX512-NEXT: [[TMP2:%.*]] = call <8 x double> @llvm.nearbyint.v8f64(<8 x double> [[TMP1]])
511 ; AVX512-NEXT: store <8 x double> [[TMP2]], ptr @dst64, align 8
512 ; AVX512-NEXT: ret void
514 %ld0 = load double, ptr @src64, align 8
515 %ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
516 %ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
517 %ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
518 %ld4 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
519 %ld5 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
520 %ld6 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
521 %ld7 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
522 %nearbyint0 = call double @llvm.nearbyint.f64(double %ld0)
523 %nearbyint1 = call double @llvm.nearbyint.f64(double %ld1)
524 %nearbyint2 = call double @llvm.nearbyint.f64(double %ld2)
525 %nearbyint3 = call double @llvm.nearbyint.f64(double %ld3)
526 %nearbyint4 = call double @llvm.nearbyint.f64(double %ld4)
527 %nearbyint5 = call double @llvm.nearbyint.f64(double %ld5)
528 %nearbyint6 = call double @llvm.nearbyint.f64(double %ld6)
529 %nearbyint7 = call double @llvm.nearbyint.f64(double %ld7)
530 store double %nearbyint0, ptr @dst64, align 8
531 store double %nearbyint1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
532 store double %nearbyint2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
533 store double %nearbyint3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
534 store double %nearbyint4, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
535 store double %nearbyint5, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
536 store double %nearbyint6, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
537 store double %nearbyint7, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
541 define void @rint_2f64() #0 {
542 ; SSE2-LABEL: @rint_2f64(
543 ; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
544 ; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
545 ; SSE2-NEXT: [[RINT0:%.*]] = call double @llvm.rint.f64(double [[LD0]])
546 ; SSE2-NEXT: [[RINT1:%.*]] = call double @llvm.rint.f64(double [[LD1]])
547 ; SSE2-NEXT: store double [[RINT0]], ptr @dst64, align 8
548 ; SSE2-NEXT: store double [[RINT1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
549 ; SSE2-NEXT: ret void
551 ; SSE41-LABEL: @rint_2f64(
552 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
553 ; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP1]])
554 ; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
555 ; SSE41-NEXT: ret void
557 ; AVX-LABEL: @rint_2f64(
558 ; AVX-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
559 ; AVX-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP1]])
560 ; AVX-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
563 %ld0 = load double, ptr @src64, align 8
564 %ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
565 %rint0 = call double @llvm.rint.f64(double %ld0)
566 %rint1 = call double @llvm.rint.f64(double %ld1)
567 store double %rint0, ptr @dst64, align 8
568 store double %rint1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
572 define void @rint_4f64() #0 {
573 ; SSE2-LABEL: @rint_4f64(
574 ; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
575 ; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
576 ; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
577 ; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
578 ; SSE2-NEXT: [[RINT0:%.*]] = call double @llvm.rint.f64(double [[LD0]])
579 ; SSE2-NEXT: [[RINT1:%.*]] = call double @llvm.rint.f64(double [[LD1]])
580 ; SSE2-NEXT: [[RINT2:%.*]] = call double @llvm.rint.f64(double [[LD2]])
581 ; SSE2-NEXT: [[RINT3:%.*]] = call double @llvm.rint.f64(double [[LD3]])
582 ; SSE2-NEXT: store double [[RINT0]], ptr @dst64, align 8
583 ; SSE2-NEXT: store double [[RINT1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
584 ; SSE2-NEXT: store double [[RINT2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
585 ; SSE2-NEXT: store double [[RINT3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
586 ; SSE2-NEXT: ret void
588 ; SSE41-LABEL: @rint_4f64(
589 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
590 ; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP1]])
591 ; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
592 ; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
593 ; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP3]])
594 ; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
595 ; SSE41-NEXT: ret void
597 ; AVX-LABEL: @rint_4f64(
598 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
599 ; AVX-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.rint.v4f64(<4 x double> [[TMP1]])
600 ; AVX-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
603 %ld0 = load double, ptr @src64, align 8
604 %ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
605 %ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
606 %ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
607 %rint0 = call double @llvm.rint.f64(double %ld0)
608 %rint1 = call double @llvm.rint.f64(double %ld1)
609 %rint2 = call double @llvm.rint.f64(double %ld2)
610 %rint3 = call double @llvm.rint.f64(double %ld3)
611 store double %rint0, ptr @dst64, align 8
612 store double %rint1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
613 store double %rint2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
614 store double %rint3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
618 define void @rint_8f64() #0 {
619 ; SSE2-LABEL: @rint_8f64(
620 ; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
621 ; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
622 ; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
623 ; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
624 ; SSE2-NEXT: [[LD4:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
625 ; SSE2-NEXT: [[LD5:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
626 ; SSE2-NEXT: [[LD6:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
627 ; SSE2-NEXT: [[LD7:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
628 ; SSE2-NEXT: [[RINT0:%.*]] = call double @llvm.rint.f64(double [[LD0]])
629 ; SSE2-NEXT: [[RINT1:%.*]] = call double @llvm.rint.f64(double [[LD1]])
630 ; SSE2-NEXT: [[RINT2:%.*]] = call double @llvm.rint.f64(double [[LD2]])
631 ; SSE2-NEXT: [[RINT3:%.*]] = call double @llvm.rint.f64(double [[LD3]])
632 ; SSE2-NEXT: [[RINT4:%.*]] = call double @llvm.rint.f64(double [[LD4]])
633 ; SSE2-NEXT: [[RINT5:%.*]] = call double @llvm.rint.f64(double [[LD5]])
634 ; SSE2-NEXT: [[RINT6:%.*]] = call double @llvm.rint.f64(double [[LD6]])
635 ; SSE2-NEXT: [[RINT7:%.*]] = call double @llvm.rint.f64(double [[LD7]])
636 ; SSE2-NEXT: store double [[RINT0]], ptr @dst64, align 8
637 ; SSE2-NEXT: store double [[RINT1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
638 ; SSE2-NEXT: store double [[RINT2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
639 ; SSE2-NEXT: store double [[RINT3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
640 ; SSE2-NEXT: store double [[RINT4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
641 ; SSE2-NEXT: store double [[RINT5]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
642 ; SSE2-NEXT: store double [[RINT6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
643 ; SSE2-NEXT: store double [[RINT7]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
644 ; SSE2-NEXT: ret void
646 ; SSE41-LABEL: @rint_8f64(
647 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
648 ; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP1]])
649 ; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
650 ; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
651 ; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP3]])
652 ; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
653 ; SSE41-NEXT: [[TMP5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
654 ; SSE41-NEXT: [[TMP6:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP5]])
655 ; SSE41-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
656 ; SSE41-NEXT: [[TMP7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
657 ; SSE41-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP7]])
658 ; SSE41-NEXT: store <2 x double> [[TMP8]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
659 ; SSE41-NEXT: ret void
661 ; AVX1-LABEL: @rint_8f64(
662 ; AVX1-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
663 ; AVX1-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.rint.v4f64(<4 x double> [[TMP1]])
664 ; AVX1-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
665 ; AVX1-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
666 ; AVX1-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.rint.v4f64(<4 x double> [[TMP3]])
667 ; AVX1-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
668 ; AVX1-NEXT: ret void
670 ; AVX2-LABEL: @rint_8f64(
671 ; AVX2-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
672 ; AVX2-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.rint.v4f64(<4 x double> [[TMP1]])
673 ; AVX2-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
674 ; AVX2-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
675 ; AVX2-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.rint.v4f64(<4 x double> [[TMP3]])
676 ; AVX2-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
677 ; AVX2-NEXT: ret void
679 ; AVX512-LABEL: @rint_8f64(
680 ; AVX512-NEXT: [[TMP1:%.*]] = load <8 x double>, ptr @src64, align 8
681 ; AVX512-NEXT: [[TMP2:%.*]] = call <8 x double> @llvm.rint.v8f64(<8 x double> [[TMP1]])
682 ; AVX512-NEXT: store <8 x double> [[TMP2]], ptr @dst64, align 8
683 ; AVX512-NEXT: ret void
685 %ld0 = load double, ptr @src64, align 8
686 %ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
687 %ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
688 %ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
689 %ld4 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
690 %ld5 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
691 %ld6 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
692 %ld7 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
693 %rint0 = call double @llvm.rint.f64(double %ld0)
694 %rint1 = call double @llvm.rint.f64(double %ld1)
695 %rint2 = call double @llvm.rint.f64(double %ld2)
696 %rint3 = call double @llvm.rint.f64(double %ld3)
697 %rint4 = call double @llvm.rint.f64(double %ld4)
698 %rint5 = call double @llvm.rint.f64(double %ld5)
699 %rint6 = call double @llvm.rint.f64(double %ld6)
700 %rint7 = call double @llvm.rint.f64(double %ld7)
701 store double %rint0, ptr @dst64, align 8
702 store double %rint1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
703 store double %rint2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
704 store double %rint3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
705 store double %rint4, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
706 store double %rint5, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
707 store double %rint6, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
708 store double %rint7, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
712 define void @trunc_2f64() #0 {
713 ; SSE2-LABEL: @trunc_2f64(
714 ; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
715 ; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
716 ; SSE2-NEXT: [[TRUNC0:%.*]] = call double @llvm.trunc.f64(double [[LD0]])
717 ; SSE2-NEXT: [[TRUNC1:%.*]] = call double @llvm.trunc.f64(double [[LD1]])
718 ; SSE2-NEXT: store double [[TRUNC0]], ptr @dst64, align 8
719 ; SSE2-NEXT: store double [[TRUNC1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
720 ; SSE2-NEXT: ret void
722 ; SSE41-LABEL: @trunc_2f64(
723 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
724 ; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP1]])
725 ; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
726 ; SSE41-NEXT: ret void
728 ; AVX-LABEL: @trunc_2f64(
729 ; AVX-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
730 ; AVX-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP1]])
731 ; AVX-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
734 %ld0 = load double, ptr @src64, align 8
735 %ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
736 %trunc0 = call double @llvm.trunc.f64(double %ld0)
737 %trunc1 = call double @llvm.trunc.f64(double %ld1)
738 store double %trunc0, ptr @dst64, align 8
739 store double %trunc1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
743 define void @trunc_4f64() #0 {
744 ; SSE2-LABEL: @trunc_4f64(
745 ; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
746 ; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
747 ; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
748 ; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
749 ; SSE2-NEXT: [[TRUNC0:%.*]] = call double @llvm.trunc.f64(double [[LD0]])
750 ; SSE2-NEXT: [[TRUNC1:%.*]] = call double @llvm.trunc.f64(double [[LD1]])
751 ; SSE2-NEXT: [[TRUNC2:%.*]] = call double @llvm.trunc.f64(double [[LD2]])
752 ; SSE2-NEXT: [[TRUNC3:%.*]] = call double @llvm.trunc.f64(double [[LD3]])
753 ; SSE2-NEXT: store double [[TRUNC0]], ptr @dst64, align 8
754 ; SSE2-NEXT: store double [[TRUNC1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
755 ; SSE2-NEXT: store double [[TRUNC2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
756 ; SSE2-NEXT: store double [[TRUNC3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
757 ; SSE2-NEXT: ret void
759 ; SSE41-LABEL: @trunc_4f64(
760 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
761 ; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP1]])
762 ; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
763 ; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
764 ; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP3]])
765 ; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
766 ; SSE41-NEXT: ret void
768 ; AVX-LABEL: @trunc_4f64(
769 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
770 ; AVX-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.trunc.v4f64(<4 x double> [[TMP1]])
771 ; AVX-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
774 %ld0 = load double, ptr @src64, align 8
775 %ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
776 %ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
777 %ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
778 %trunc0 = call double @llvm.trunc.f64(double %ld0)
779 %trunc1 = call double @llvm.trunc.f64(double %ld1)
780 %trunc2 = call double @llvm.trunc.f64(double %ld2)
781 %trunc3 = call double @llvm.trunc.f64(double %ld3)
782 store double %trunc0, ptr @dst64, align 8
783 store double %trunc1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
784 store double %trunc2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
785 store double %trunc3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
789 define void @trunc_8f64() #0 {
790 ; SSE2-LABEL: @trunc_8f64(
791 ; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
792 ; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
793 ; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
794 ; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
795 ; SSE2-NEXT: [[LD4:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
796 ; SSE2-NEXT: [[LD5:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
797 ; SSE2-NEXT: [[LD6:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
798 ; SSE2-NEXT: [[LD7:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
799 ; SSE2-NEXT: [[TRUNC0:%.*]] = call double @llvm.trunc.f64(double [[LD0]])
800 ; SSE2-NEXT: [[TRUNC1:%.*]] = call double @llvm.trunc.f64(double [[LD1]])
801 ; SSE2-NEXT: [[TRUNC2:%.*]] = call double @llvm.trunc.f64(double [[LD2]])
802 ; SSE2-NEXT: [[TRUNC3:%.*]] = call double @llvm.trunc.f64(double [[LD3]])
803 ; SSE2-NEXT: [[TRUNC4:%.*]] = call double @llvm.trunc.f64(double [[LD4]])
804 ; SSE2-NEXT: [[TRUNC5:%.*]] = call double @llvm.trunc.f64(double [[LD5]])
805 ; SSE2-NEXT: [[TRUNC6:%.*]] = call double @llvm.trunc.f64(double [[LD6]])
806 ; SSE2-NEXT: [[TRUNC7:%.*]] = call double @llvm.trunc.f64(double [[LD7]])
807 ; SSE2-NEXT: store double [[TRUNC0]], ptr @dst64, align 8
808 ; SSE2-NEXT: store double [[TRUNC1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
809 ; SSE2-NEXT: store double [[TRUNC2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
810 ; SSE2-NEXT: store double [[TRUNC3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
811 ; SSE2-NEXT: store double [[TRUNC4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
812 ; SSE2-NEXT: store double [[TRUNC5]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
813 ; SSE2-NEXT: store double [[TRUNC6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
814 ; SSE2-NEXT: store double [[TRUNC7]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
815 ; SSE2-NEXT: ret void
817 ; SSE41-LABEL: @trunc_8f64(
818 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
819 ; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP1]])
820 ; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
821 ; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
822 ; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP3]])
823 ; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
824 ; SSE41-NEXT: [[TMP5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
825 ; SSE41-NEXT: [[TMP6:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP5]])
826 ; SSE41-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
827 ; SSE41-NEXT: [[TMP7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
828 ; SSE41-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP7]])
829 ; SSE41-NEXT: store <2 x double> [[TMP8]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
830 ; SSE41-NEXT: ret void
832 ; AVX1-LABEL: @trunc_8f64(
833 ; AVX1-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
834 ; AVX1-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.trunc.v4f64(<4 x double> [[TMP1]])
835 ; AVX1-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
836 ; AVX1-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
837 ; AVX1-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.trunc.v4f64(<4 x double> [[TMP3]])
838 ; AVX1-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
839 ; AVX1-NEXT: ret void
841 ; AVX2-LABEL: @trunc_8f64(
842 ; AVX2-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
843 ; AVX2-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.trunc.v4f64(<4 x double> [[TMP1]])
844 ; AVX2-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
845 ; AVX2-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
846 ; AVX2-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.trunc.v4f64(<4 x double> [[TMP3]])
847 ; AVX2-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
848 ; AVX2-NEXT: ret void
850 ; AVX512-LABEL: @trunc_8f64(
851 ; AVX512-NEXT: [[TMP1:%.*]] = load <8 x double>, ptr @src64, align 8
852 ; AVX512-NEXT: [[TMP2:%.*]] = call <8 x double> @llvm.trunc.v8f64(<8 x double> [[TMP1]])
853 ; AVX512-NEXT: store <8 x double> [[TMP2]], ptr @dst64, align 8
854 ; AVX512-NEXT: ret void
856 %ld0 = load double, ptr @src64, align 8
857 %ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
858 %ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
859 %ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
860 %ld4 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
861 %ld5 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
862 %ld6 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
863 %ld7 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
864 %trunc0 = call double @llvm.trunc.f64(double %ld0)
865 %trunc1 = call double @llvm.trunc.f64(double %ld1)
866 %trunc2 = call double @llvm.trunc.f64(double %ld2)
867 %trunc3 = call double @llvm.trunc.f64(double %ld3)
868 %trunc4 = call double @llvm.trunc.f64(double %ld4)
869 %trunc5 = call double @llvm.trunc.f64(double %ld5)
870 %trunc6 = call double @llvm.trunc.f64(double %ld6)
871 %trunc7 = call double @llvm.trunc.f64(double %ld7)
872 store double %trunc0, ptr @dst64, align 8
873 store double %trunc1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
874 store double %trunc2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
875 store double %trunc3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
876 store double %trunc4, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
877 store double %trunc5, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
878 store double %trunc6, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
879 store double %trunc7, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
883 define void @ceil_4f32() #0 {
884 ; SSE2-LABEL: @ceil_4f32(
885 ; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
886 ; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
887 ; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
888 ; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
889 ; SSE2-NEXT: [[CEIL0:%.*]] = call float @llvm.ceil.f32(float [[LD0]])
890 ; SSE2-NEXT: [[CEIL1:%.*]] = call float @llvm.ceil.f32(float [[LD1]])
891 ; SSE2-NEXT: [[CEIL2:%.*]] = call float @llvm.ceil.f32(float [[LD2]])
892 ; SSE2-NEXT: [[CEIL3:%.*]] = call float @llvm.ceil.f32(float [[LD3]])
893 ; SSE2-NEXT: store float [[CEIL0]], ptr @dst32, align 4
894 ; SSE2-NEXT: store float [[CEIL1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
895 ; SSE2-NEXT: store float [[CEIL2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
896 ; SSE2-NEXT: store float [[CEIL3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
897 ; SSE2-NEXT: ret void
899 ; SSE41-LABEL: @ceil_4f32(
900 ; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
901 ; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP1]])
902 ; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
903 ; SSE41-NEXT: ret void
905 ; AVX-LABEL: @ceil_4f32(
906 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
907 ; AVX-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP1]])
908 ; AVX-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
911 %ld0 = load float, ptr @src32, align 4
912 %ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
913 %ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
914 %ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
915 %ceil0 = call float @llvm.ceil.f32(float %ld0)
916 %ceil1 = call float @llvm.ceil.f32(float %ld1)
917 %ceil2 = call float @llvm.ceil.f32(float %ld2)
918 %ceil3 = call float @llvm.ceil.f32(float %ld3)
919 store float %ceil0, ptr @dst32, align 4
920 store float %ceil1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
921 store float %ceil2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
922 store float %ceil3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
926 define void @ceil_8f32() #0 {
927 ; SSE2-LABEL: @ceil_8f32(
928 ; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
929 ; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
930 ; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
931 ; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
932 ; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
933 ; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
934 ; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
935 ; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
936 ; SSE2-NEXT: [[CEIL0:%.*]] = call float @llvm.ceil.f32(float [[LD0]])
937 ; SSE2-NEXT: [[CEIL1:%.*]] = call float @llvm.ceil.f32(float [[LD1]])
938 ; SSE2-NEXT: [[CEIL2:%.*]] = call float @llvm.ceil.f32(float [[LD2]])
939 ; SSE2-NEXT: [[CEIL3:%.*]] = call float @llvm.ceil.f32(float [[LD3]])
940 ; SSE2-NEXT: [[CEIL4:%.*]] = call float @llvm.ceil.f32(float [[LD4]])
941 ; SSE2-NEXT: [[CEIL5:%.*]] = call float @llvm.ceil.f32(float [[LD5]])
942 ; SSE2-NEXT: [[CEIL6:%.*]] = call float @llvm.ceil.f32(float [[LD6]])
943 ; SSE2-NEXT: [[CEIL7:%.*]] = call float @llvm.ceil.f32(float [[LD7]])
944 ; SSE2-NEXT: store float [[CEIL0]], ptr @dst32, align 4
945 ; SSE2-NEXT: store float [[CEIL1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
946 ; SSE2-NEXT: store float [[CEIL2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
947 ; SSE2-NEXT: store float [[CEIL3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
948 ; SSE2-NEXT: store float [[CEIL4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
949 ; SSE2-NEXT: store float [[CEIL5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
950 ; SSE2-NEXT: store float [[CEIL6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
951 ; SSE2-NEXT: store float [[CEIL7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
952 ; SSE2-NEXT: ret void
954 ; SSE41-LABEL: @ceil_8f32(
955 ; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
956 ; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP1]])
957 ; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
958 ; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
959 ; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP3]])
960 ; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
961 ; SSE41-NEXT: ret void
963 ; AVX-LABEL: @ceil_8f32(
964 ; AVX-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
965 ; AVX-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.ceil.v8f32(<8 x float> [[TMP1]])
966 ; AVX-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
969 %ld0 = load float, ptr @src32, align 4
970 %ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
971 %ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
972 %ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
973 %ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
974 %ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
975 %ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
976 %ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
977 %ceil0 = call float @llvm.ceil.f32(float %ld0)
978 %ceil1 = call float @llvm.ceil.f32(float %ld1)
979 %ceil2 = call float @llvm.ceil.f32(float %ld2)
980 %ceil3 = call float @llvm.ceil.f32(float %ld3)
981 %ceil4 = call float @llvm.ceil.f32(float %ld4)
982 %ceil5 = call float @llvm.ceil.f32(float %ld5)
983 %ceil6 = call float @llvm.ceil.f32(float %ld6)
984 %ceil7 = call float @llvm.ceil.f32(float %ld7)
985 store float %ceil0, ptr @dst32, align 4
986 store float %ceil1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
987 store float %ceil2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
988 store float %ceil3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
989 store float %ceil4, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
990 store float %ceil5, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
991 store float %ceil6, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
992 store float %ceil7, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
996 define void @ceil_16f32() #0 {
997 ; SSE2-LABEL: @ceil_16f32(
998 ; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
999 ; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1000 ; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1001 ; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1002 ; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1003 ; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
1004 ; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
1005 ; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
1006 ; SSE2-NEXT: [[LD8:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
1007 ; SSE2-NEXT: [[LD9:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9), align 4
1008 ; SSE2-NEXT: [[LD10:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
1009 ; SSE2-NEXT: [[LD11:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
1010 ; SSE2-NEXT: [[LD12:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
1011 ; SSE2-NEXT: [[LD13:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
1012 ; SSE2-NEXT: [[LD14:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
1013 ; SSE2-NEXT: [[LD15:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
1014 ; SSE2-NEXT: [[CEIL0:%.*]] = call float @llvm.ceil.f32(float [[LD0]])
1015 ; SSE2-NEXT: [[CEIL1:%.*]] = call float @llvm.ceil.f32(float [[LD1]])
1016 ; SSE2-NEXT: [[CEIL2:%.*]] = call float @llvm.ceil.f32(float [[LD2]])
1017 ; SSE2-NEXT: [[CEIL3:%.*]] = call float @llvm.ceil.f32(float [[LD3]])
1018 ; SSE2-NEXT: [[CEIL4:%.*]] = call float @llvm.ceil.f32(float [[LD4]])
1019 ; SSE2-NEXT: [[CEIL5:%.*]] = call float @llvm.ceil.f32(float [[LD5]])
1020 ; SSE2-NEXT: [[CEIL6:%.*]] = call float @llvm.ceil.f32(float [[LD6]])
1021 ; SSE2-NEXT: [[CEIL7:%.*]] = call float @llvm.ceil.f32(float [[LD7]])
1022 ; SSE2-NEXT: [[CEIL8:%.*]] = call float @llvm.ceil.f32(float [[LD8]])
1023 ; SSE2-NEXT: [[CEIL9:%.*]] = call float @llvm.ceil.f32(float [[LD9]])
1024 ; SSE2-NEXT: [[CEIL10:%.*]] = call float @llvm.ceil.f32(float [[LD10]])
1025 ; SSE2-NEXT: [[CEIL11:%.*]] = call float @llvm.ceil.f32(float [[LD11]])
1026 ; SSE2-NEXT: [[CEIL12:%.*]] = call float @llvm.ceil.f32(float [[LD12]])
1027 ; SSE2-NEXT: [[CEIL13:%.*]] = call float @llvm.ceil.f32(float [[LD13]])
1028 ; SSE2-NEXT: [[CEIL14:%.*]] = call float @llvm.ceil.f32(float [[LD14]])
1029 ; SSE2-NEXT: [[CEIL15:%.*]] = call float @llvm.ceil.f32(float [[LD15]])
1030 ; SSE2-NEXT: store float [[CEIL0]], ptr @dst32, align 4
1031 ; SSE2-NEXT: store float [[CEIL1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1032 ; SSE2-NEXT: store float [[CEIL2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1033 ; SSE2-NEXT: store float [[CEIL3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1034 ; SSE2-NEXT: store float [[CEIL4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1035 ; SSE2-NEXT: store float [[CEIL5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
1036 ; SSE2-NEXT: store float [[CEIL6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
1037 ; SSE2-NEXT: store float [[CEIL7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
1038 ; SSE2-NEXT: store float [[CEIL8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
1039 ; SSE2-NEXT: store float [[CEIL9]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9), align 4
1040 ; SSE2-NEXT: store float [[CEIL10]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
1041 ; SSE2-NEXT: store float [[CEIL11]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
1042 ; SSE2-NEXT: store float [[CEIL12]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
1043 ; SSE2-NEXT: store float [[CEIL13]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
1044 ; SSE2-NEXT: store float [[CEIL14]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
1045 ; SSE2-NEXT: store float [[CEIL15]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
1046 ; SSE2-NEXT: ret void
1048 ; SSE41-LABEL: @ceil_16f32(
1049 ; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
1050 ; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP1]])
1051 ; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
1052 ; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1053 ; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP3]])
1054 ; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1055 ; SSE41-NEXT: [[TMP5:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
1056 ; SSE41-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP5]])
1057 ; SSE41-NEXT: store <4 x float> [[TMP6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
1058 ; SSE41-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
1059 ; SSE41-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP7]])
1060 ; SSE41-NEXT: store <4 x float> [[TMP8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
1061 ; SSE41-NEXT: ret void
1063 ; AVX1-LABEL: @ceil_16f32(
1064 ; AVX1-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
1065 ; AVX1-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.ceil.v8f32(<8 x float> [[TMP1]])
1066 ; AVX1-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
1067 ; AVX1-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
1068 ; AVX1-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.ceil.v8f32(<8 x float> [[TMP3]])
1069 ; AVX1-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
1070 ; AVX1-NEXT: ret void
1072 ; AVX2-LABEL: @ceil_16f32(
1073 ; AVX2-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
1074 ; AVX2-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.ceil.v8f32(<8 x float> [[TMP1]])
1075 ; AVX2-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
1076 ; AVX2-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
1077 ; AVX2-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.ceil.v8f32(<8 x float> [[TMP3]])
1078 ; AVX2-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
1079 ; AVX2-NEXT: ret void
1081 ; AVX512-LABEL: @ceil_16f32(
1082 ; AVX512-NEXT: [[TMP1:%.*]] = load <16 x float>, ptr @src32, align 4
1083 ; AVX512-NEXT: [[TMP2:%.*]] = call <16 x float> @llvm.ceil.v16f32(<16 x float> [[TMP1]])
1084 ; AVX512-NEXT: store <16 x float> [[TMP2]], ptr @dst32, align 4
1085 ; AVX512-NEXT: ret void
1087 %ld0 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 0 ), align 4
1088 %ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1 ), align 4
1089 %ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2 ), align 4
1090 %ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3 ), align 4
1091 %ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4 ), align 4
1092 %ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5 ), align 4
1093 %ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6 ), align 4
1094 %ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7 ), align 4
1095 %ld8 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8 ), align 4
1096 %ld9 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9 ), align 4
1097 %ld10 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
1098 %ld11 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
1099 %ld12 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
1100 %ld13 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
1101 %ld14 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
1102 %ld15 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
1103 %ceil0 = call float @llvm.ceil.f32(float %ld0 )
1104 %ceil1 = call float @llvm.ceil.f32(float %ld1 )
1105 %ceil2 = call float @llvm.ceil.f32(float %ld2 )
1106 %ceil3 = call float @llvm.ceil.f32(float %ld3 )
1107 %ceil4 = call float @llvm.ceil.f32(float %ld4 )
1108 %ceil5 = call float @llvm.ceil.f32(float %ld5 )
1109 %ceil6 = call float @llvm.ceil.f32(float %ld6 )
1110 %ceil7 = call float @llvm.ceil.f32(float %ld7 )
1111 %ceil8 = call float @llvm.ceil.f32(float %ld8 )
1112 %ceil9 = call float @llvm.ceil.f32(float %ld9 )
1113 %ceil10 = call float @llvm.ceil.f32(float %ld10)
1114 %ceil11 = call float @llvm.ceil.f32(float %ld11)
1115 %ceil12 = call float @llvm.ceil.f32(float %ld12)
1116 %ceil13 = call float @llvm.ceil.f32(float %ld13)
1117 %ceil14 = call float @llvm.ceil.f32(float %ld14)
1118 %ceil15 = call float @llvm.ceil.f32(float %ld15)
1119 store float %ceil0 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 0 ), align 4
1120 store float %ceil1 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1 ), align 4
1121 store float %ceil2 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2 ), align 4
1122 store float %ceil3 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3 ), align 4
1123 store float %ceil4 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4 ), align 4
1124 store float %ceil5 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5 ), align 4
1125 store float %ceil6 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6 ), align 4
1126 store float %ceil7 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7 ), align 4
1127 store float %ceil8 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8 ), align 4
1128 store float %ceil9 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9 ), align 4
1129 store float %ceil10, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
1130 store float %ceil11, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
1131 store float %ceil12, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
1132 store float %ceil13, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
1133 store float %ceil14, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
1134 store float %ceil15, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
1138 define void @floor_4f32() #0 {
1139 ; SSE2-LABEL: @floor_4f32(
1140 ; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
1141 ; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1142 ; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1143 ; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1144 ; SSE2-NEXT: [[FLOOR0:%.*]] = call float @llvm.floor.f32(float [[LD0]])
1145 ; SSE2-NEXT: [[FLOOR1:%.*]] = call float @llvm.floor.f32(float [[LD1]])
1146 ; SSE2-NEXT: [[FLOOR2:%.*]] = call float @llvm.floor.f32(float [[LD2]])
1147 ; SSE2-NEXT: [[FLOOR3:%.*]] = call float @llvm.floor.f32(float [[LD3]])
1148 ; SSE2-NEXT: store float [[FLOOR0]], ptr @dst32, align 4
1149 ; SSE2-NEXT: store float [[FLOOR1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1150 ; SSE2-NEXT: store float [[FLOOR2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1151 ; SSE2-NEXT: store float [[FLOOR3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1152 ; SSE2-NEXT: ret void
1154 ; SSE41-LABEL: @floor_4f32(
1155 ; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
1156 ; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP1]])
1157 ; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
1158 ; SSE41-NEXT: ret void
1160 ; AVX-LABEL: @floor_4f32(
1161 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
1162 ; AVX-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP1]])
1163 ; AVX-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
1164 ; AVX-NEXT: ret void
1166 %ld0 = load float, ptr @src32, align 4
1167 %ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1168 %ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1169 %ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1170 %floor0 = call float @llvm.floor.f32(float %ld0)
1171 %floor1 = call float @llvm.floor.f32(float %ld1)
1172 %floor2 = call float @llvm.floor.f32(float %ld2)
1173 %floor3 = call float @llvm.floor.f32(float %ld3)
1174 store float %floor0, ptr @dst32, align 4
1175 store float %floor1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1176 store float %floor2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1177 store float %floor3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1181 define void @floor_8f32() #0 {
1182 ; SSE2-LABEL: @floor_8f32(
1183 ; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
1184 ; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1185 ; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1186 ; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1187 ; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1188 ; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
1189 ; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
1190 ; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
1191 ; SSE2-NEXT: [[FLOOR0:%.*]] = call float @llvm.floor.f32(float [[LD0]])
1192 ; SSE2-NEXT: [[FLOOR1:%.*]] = call float @llvm.floor.f32(float [[LD1]])
1193 ; SSE2-NEXT: [[FLOOR2:%.*]] = call float @llvm.floor.f32(float [[LD2]])
1194 ; SSE2-NEXT: [[FLOOR3:%.*]] = call float @llvm.floor.f32(float [[LD3]])
1195 ; SSE2-NEXT: [[FLOOR4:%.*]] = call float @llvm.floor.f32(float [[LD4]])
1196 ; SSE2-NEXT: [[FLOOR5:%.*]] = call float @llvm.floor.f32(float [[LD5]])
1197 ; SSE2-NEXT: [[FLOOR6:%.*]] = call float @llvm.floor.f32(float [[LD6]])
1198 ; SSE2-NEXT: [[FLOOR7:%.*]] = call float @llvm.floor.f32(float [[LD7]])
1199 ; SSE2-NEXT: store float [[FLOOR0]], ptr @dst32, align 4
1200 ; SSE2-NEXT: store float [[FLOOR1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1201 ; SSE2-NEXT: store float [[FLOOR2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1202 ; SSE2-NEXT: store float [[FLOOR3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1203 ; SSE2-NEXT: store float [[FLOOR4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1204 ; SSE2-NEXT: store float [[FLOOR5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
1205 ; SSE2-NEXT: store float [[FLOOR6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
1206 ; SSE2-NEXT: store float [[FLOOR7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
1207 ; SSE2-NEXT: ret void
1209 ; SSE41-LABEL: @floor_8f32(
1210 ; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
1211 ; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP1]])
1212 ; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
1213 ; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1214 ; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP3]])
1215 ; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1216 ; SSE41-NEXT: ret void
1218 ; AVX-LABEL: @floor_8f32(
1219 ; AVX-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
1220 ; AVX-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.floor.v8f32(<8 x float> [[TMP1]])
1221 ; AVX-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
1222 ; AVX-NEXT: ret void
1224 %ld0 = load float, ptr @src32, align 4
1225 %ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1226 %ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1227 %ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1228 %ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1229 %ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
1230 %ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
1231 %ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
1232 %floor0 = call float @llvm.floor.f32(float %ld0)
1233 %floor1 = call float @llvm.floor.f32(float %ld1)
1234 %floor2 = call float @llvm.floor.f32(float %ld2)
1235 %floor3 = call float @llvm.floor.f32(float %ld3)
1236 %floor4 = call float @llvm.floor.f32(float %ld4)
1237 %floor5 = call float @llvm.floor.f32(float %ld5)
1238 %floor6 = call float @llvm.floor.f32(float %ld6)
1239 %floor7 = call float @llvm.floor.f32(float %ld7)
1240 store float %floor0, ptr @dst32, align 4
1241 store float %floor1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1242 store float %floor2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1243 store float %floor3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1244 store float %floor4, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1245 store float %floor5, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
1246 store float %floor6, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
1247 store float %floor7, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
1251 define void @floor_16f32() #0 {
1252 ; SSE2-LABEL: @floor_16f32(
1253 ; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
1254 ; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1255 ; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1256 ; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1257 ; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1258 ; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
1259 ; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
1260 ; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
1261 ; SSE2-NEXT: [[LD8:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
1262 ; SSE2-NEXT: [[LD9:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9), align 4
1263 ; SSE2-NEXT: [[LD10:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
1264 ; SSE2-NEXT: [[LD11:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
1265 ; SSE2-NEXT: [[LD12:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
1266 ; SSE2-NEXT: [[LD13:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
1267 ; SSE2-NEXT: [[LD14:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
1268 ; SSE2-NEXT: [[LD15:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
1269 ; SSE2-NEXT: [[FLOOR0:%.*]] = call float @llvm.floor.f32(float [[LD0]])
1270 ; SSE2-NEXT: [[FLOOR1:%.*]] = call float @llvm.floor.f32(float [[LD1]])
1271 ; SSE2-NEXT: [[FLOOR2:%.*]] = call float @llvm.floor.f32(float [[LD2]])
1272 ; SSE2-NEXT: [[FLOOR3:%.*]] = call float @llvm.floor.f32(float [[LD3]])
1273 ; SSE2-NEXT: [[FLOOR4:%.*]] = call float @llvm.floor.f32(float [[LD4]])
1274 ; SSE2-NEXT: [[FLOOR5:%.*]] = call float @llvm.floor.f32(float [[LD5]])
1275 ; SSE2-NEXT: [[FLOOR6:%.*]] = call float @llvm.floor.f32(float [[LD6]])
1276 ; SSE2-NEXT: [[FLOOR7:%.*]] = call float @llvm.floor.f32(float [[LD7]])
1277 ; SSE2-NEXT: [[FLOOR8:%.*]] = call float @llvm.floor.f32(float [[LD8]])
1278 ; SSE2-NEXT: [[FLOOR9:%.*]] = call float @llvm.floor.f32(float [[LD9]])
1279 ; SSE2-NEXT: [[FLOOR10:%.*]] = call float @llvm.floor.f32(float [[LD10]])
1280 ; SSE2-NEXT: [[FLOOR11:%.*]] = call float @llvm.floor.f32(float [[LD11]])
1281 ; SSE2-NEXT: [[FLOOR12:%.*]] = call float @llvm.floor.f32(float [[LD12]])
1282 ; SSE2-NEXT: [[FLOOR13:%.*]] = call float @llvm.floor.f32(float [[LD13]])
1283 ; SSE2-NEXT: [[FLOOR14:%.*]] = call float @llvm.floor.f32(float [[LD14]])
1284 ; SSE2-NEXT: [[FLOOR15:%.*]] = call float @llvm.floor.f32(float [[LD15]])
1285 ; SSE2-NEXT: store float [[FLOOR0]], ptr @dst32, align 4
1286 ; SSE2-NEXT: store float [[FLOOR1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1287 ; SSE2-NEXT: store float [[FLOOR2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1288 ; SSE2-NEXT: store float [[FLOOR3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1289 ; SSE2-NEXT: store float [[FLOOR4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1290 ; SSE2-NEXT: store float [[FLOOR5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
1291 ; SSE2-NEXT: store float [[FLOOR6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
1292 ; SSE2-NEXT: store float [[FLOOR7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
1293 ; SSE2-NEXT: store float [[FLOOR8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
1294 ; SSE2-NEXT: store float [[FLOOR9]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9), align 4
1295 ; SSE2-NEXT: store float [[FLOOR10]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
1296 ; SSE2-NEXT: store float [[FLOOR11]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
1297 ; SSE2-NEXT: store float [[FLOOR12]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
1298 ; SSE2-NEXT: store float [[FLOOR13]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
1299 ; SSE2-NEXT: store float [[FLOOR14]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
1300 ; SSE2-NEXT: store float [[FLOOR15]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
1301 ; SSE2-NEXT: ret void
1303 ; SSE41-LABEL: @floor_16f32(
1304 ; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
1305 ; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP1]])
1306 ; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
1307 ; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1308 ; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP3]])
1309 ; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1310 ; SSE41-NEXT: [[TMP5:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
1311 ; SSE41-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP5]])
1312 ; SSE41-NEXT: store <4 x float> [[TMP6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
1313 ; SSE41-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
1314 ; SSE41-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP7]])
1315 ; SSE41-NEXT: store <4 x float> [[TMP8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
1316 ; SSE41-NEXT: ret void
1318 ; AVX1-LABEL: @floor_16f32(
1319 ; AVX1-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
1320 ; AVX1-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.floor.v8f32(<8 x float> [[TMP1]])
1321 ; AVX1-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
1322 ; AVX1-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
1323 ; AVX1-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.floor.v8f32(<8 x float> [[TMP3]])
1324 ; AVX1-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
1325 ; AVX1-NEXT: ret void
1327 ; AVX2-LABEL: @floor_16f32(
1328 ; AVX2-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
1329 ; AVX2-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.floor.v8f32(<8 x float> [[TMP1]])
1330 ; AVX2-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
1331 ; AVX2-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
1332 ; AVX2-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.floor.v8f32(<8 x float> [[TMP3]])
1333 ; AVX2-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
1334 ; AVX2-NEXT: ret void
1336 ; AVX512-LABEL: @floor_16f32(
1337 ; AVX512-NEXT: [[TMP1:%.*]] = load <16 x float>, ptr @src32, align 4
1338 ; AVX512-NEXT: [[TMP2:%.*]] = call <16 x float> @llvm.floor.v16f32(<16 x float> [[TMP1]])
1339 ; AVX512-NEXT: store <16 x float> [[TMP2]], ptr @dst32, align 4
1340 ; AVX512-NEXT: ret void
1342 %ld0 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 0 ), align 4
1343 %ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1 ), align 4
1344 %ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2 ), align 4
1345 %ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3 ), align 4
1346 %ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4 ), align 4
1347 %ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5 ), align 4
1348 %ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6 ), align 4
1349 %ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7 ), align 4
1350 %ld8 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8 ), align 4
1351 %ld9 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9 ), align 4
1352 %ld10 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
1353 %ld11 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
1354 %ld12 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
1355 %ld13 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
1356 %ld14 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
1357 %ld15 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
1358 %floor0 = call float @llvm.floor.f32(float %ld0 )
1359 %floor1 = call float @llvm.floor.f32(float %ld1 )
1360 %floor2 = call float @llvm.floor.f32(float %ld2 )
1361 %floor3 = call float @llvm.floor.f32(float %ld3 )
1362 %floor4 = call float @llvm.floor.f32(float %ld4 )
1363 %floor5 = call float @llvm.floor.f32(float %ld5 )
1364 %floor6 = call float @llvm.floor.f32(float %ld6 )
1365 %floor7 = call float @llvm.floor.f32(float %ld7 )
1366 %floor8 = call float @llvm.floor.f32(float %ld8 )
1367 %floor9 = call float @llvm.floor.f32(float %ld9 )
1368 %floor10 = call float @llvm.floor.f32(float %ld10)
1369 %floor11 = call float @llvm.floor.f32(float %ld11)
1370 %floor12 = call float @llvm.floor.f32(float %ld12)
1371 %floor13 = call float @llvm.floor.f32(float %ld13)
1372 %floor14 = call float @llvm.floor.f32(float %ld14)
1373 %floor15 = call float @llvm.floor.f32(float %ld15)
1374 store float %floor0 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 0 ), align 4
1375 store float %floor1 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1 ), align 4
1376 store float %floor2 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2 ), align 4
1377 store float %floor3 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3 ), align 4
1378 store float %floor4 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4 ), align 4
1379 store float %floor5 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5 ), align 4
1380 store float %floor6 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6 ), align 4
1381 store float %floor7 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7 ), align 4
1382 store float %floor8 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8 ), align 4
1383 store float %floor9 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9 ), align 4
1384 store float %floor10, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
1385 store float %floor11, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
1386 store float %floor12, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
1387 store float %floor13, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
1388 store float %floor14, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
1389 store float %floor15, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
1393 define void @nearbyint_4f32() #0 {
1394 ; SSE2-LABEL: @nearbyint_4f32(
1395 ; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
1396 ; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1397 ; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1398 ; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1399 ; SSE2-NEXT: [[NEARBYINT0:%.*]] = call float @llvm.nearbyint.f32(float [[LD0]])
1400 ; SSE2-NEXT: [[NEARBYINT1:%.*]] = call float @llvm.nearbyint.f32(float [[LD1]])
1401 ; SSE2-NEXT: [[NEARBYINT2:%.*]] = call float @llvm.nearbyint.f32(float [[LD2]])
1402 ; SSE2-NEXT: [[NEARBYINT3:%.*]] = call float @llvm.nearbyint.f32(float [[LD3]])
1403 ; SSE2-NEXT: store float [[NEARBYINT0]], ptr @dst32, align 4
1404 ; SSE2-NEXT: store float [[NEARBYINT1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1405 ; SSE2-NEXT: store float [[NEARBYINT2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1406 ; SSE2-NEXT: store float [[NEARBYINT3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1407 ; SSE2-NEXT: ret void
1409 ; SSE41-LABEL: @nearbyint_4f32(
1410 ; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
1411 ; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP1]])
1412 ; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
1413 ; SSE41-NEXT: ret void
1415 ; AVX-LABEL: @nearbyint_4f32(
1416 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
1417 ; AVX-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP1]])
1418 ; AVX-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
1419 ; AVX-NEXT: ret void
1421 %ld0 = load float, ptr @src32, align 4
1422 %ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1423 %ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1424 %ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1425 %nearbyint0 = call float @llvm.nearbyint.f32(float %ld0)
1426 %nearbyint1 = call float @llvm.nearbyint.f32(float %ld1)
1427 %nearbyint2 = call float @llvm.nearbyint.f32(float %ld2)
1428 %nearbyint3 = call float @llvm.nearbyint.f32(float %ld3)
1429 store float %nearbyint0, ptr @dst32, align 4
1430 store float %nearbyint1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1431 store float %nearbyint2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1432 store float %nearbyint3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1436 define void @nearbyint_8f32() #0 {
1437 ; SSE2-LABEL: @nearbyint_8f32(
1438 ; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
1439 ; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1440 ; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1441 ; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1442 ; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1443 ; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
1444 ; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
1445 ; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
1446 ; SSE2-NEXT: [[NEARBYINT0:%.*]] = call float @llvm.nearbyint.f32(float [[LD0]])
1447 ; SSE2-NEXT: [[NEARBYINT1:%.*]] = call float @llvm.nearbyint.f32(float [[LD1]])
1448 ; SSE2-NEXT: [[NEARBYINT2:%.*]] = call float @llvm.nearbyint.f32(float [[LD2]])
1449 ; SSE2-NEXT: [[NEARBYINT3:%.*]] = call float @llvm.nearbyint.f32(float [[LD3]])
1450 ; SSE2-NEXT: [[NEARBYINT4:%.*]] = call float @llvm.nearbyint.f32(float [[LD4]])
1451 ; SSE2-NEXT: [[NEARBYINT5:%.*]] = call float @llvm.nearbyint.f32(float [[LD5]])
1452 ; SSE2-NEXT: [[NEARBYINT6:%.*]] = call float @llvm.nearbyint.f32(float [[LD6]])
1453 ; SSE2-NEXT: [[NEARBYINT7:%.*]] = call float @llvm.nearbyint.f32(float [[LD7]])
1454 ; SSE2-NEXT: store float [[NEARBYINT0]], ptr @dst32, align 4
1455 ; SSE2-NEXT: store float [[NEARBYINT1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1456 ; SSE2-NEXT: store float [[NEARBYINT2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1457 ; SSE2-NEXT: store float [[NEARBYINT3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1458 ; SSE2-NEXT: store float [[NEARBYINT4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1459 ; SSE2-NEXT: store float [[NEARBYINT5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
1460 ; SSE2-NEXT: store float [[NEARBYINT6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
1461 ; SSE2-NEXT: store float [[NEARBYINT7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
1462 ; SSE2-NEXT: ret void
1464 ; SSE41-LABEL: @nearbyint_8f32(
1465 ; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
1466 ; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP1]])
1467 ; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
1468 ; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1469 ; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP3]])
1470 ; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1471 ; SSE41-NEXT: ret void
1473 ; AVX-LABEL: @nearbyint_8f32(
1474 ; AVX-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
1475 ; AVX-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> [[TMP1]])
1476 ; AVX-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
1477 ; AVX-NEXT: ret void
1479 %ld0 = load float, ptr @src32, align 4
1480 %ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1481 %ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1482 %ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1483 %ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1484 %ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
1485 %ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
1486 %ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
1487 %nearbyint0 = call float @llvm.nearbyint.f32(float %ld0)
1488 %nearbyint1 = call float @llvm.nearbyint.f32(float %ld1)
1489 %nearbyint2 = call float @llvm.nearbyint.f32(float %ld2)
1490 %nearbyint3 = call float @llvm.nearbyint.f32(float %ld3)
1491 %nearbyint4 = call float @llvm.nearbyint.f32(float %ld4)
1492 %nearbyint5 = call float @llvm.nearbyint.f32(float %ld5)
1493 %nearbyint6 = call float @llvm.nearbyint.f32(float %ld6)
1494 %nearbyint7 = call float @llvm.nearbyint.f32(float %ld7)
1495 store float %nearbyint0, ptr @dst32, align 4
1496 store float %nearbyint1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1497 store float %nearbyint2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1498 store float %nearbyint3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1499 store float %nearbyint4, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1500 store float %nearbyint5, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
1501 store float %nearbyint6, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
1502 store float %nearbyint7, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
1506 define void @nearbyint_16f32() #0 {
1507 ; SSE2-LABEL: @nearbyint_16f32(
1508 ; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
1509 ; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1510 ; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1511 ; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1512 ; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1513 ; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
1514 ; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
1515 ; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
1516 ; SSE2-NEXT: [[LD8:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
1517 ; SSE2-NEXT: [[LD9:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9), align 4
1518 ; SSE2-NEXT: [[LD10:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
1519 ; SSE2-NEXT: [[LD11:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
1520 ; SSE2-NEXT: [[LD12:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
1521 ; SSE2-NEXT: [[LD13:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
1522 ; SSE2-NEXT: [[LD14:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
1523 ; SSE2-NEXT: [[LD15:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
1524 ; SSE2-NEXT: [[NEARBYINT0:%.*]] = call float @llvm.nearbyint.f32(float [[LD0]])
1525 ; SSE2-NEXT: [[NEARBYINT1:%.*]] = call float @llvm.nearbyint.f32(float [[LD1]])
1526 ; SSE2-NEXT: [[NEARBYINT2:%.*]] = call float @llvm.nearbyint.f32(float [[LD2]])
1527 ; SSE2-NEXT: [[NEARBYINT3:%.*]] = call float @llvm.nearbyint.f32(float [[LD3]])
1528 ; SSE2-NEXT: [[NEARBYINT4:%.*]] = call float @llvm.nearbyint.f32(float [[LD4]])
1529 ; SSE2-NEXT: [[NEARBYINT5:%.*]] = call float @llvm.nearbyint.f32(float [[LD5]])
1530 ; SSE2-NEXT: [[NEARBYINT6:%.*]] = call float @llvm.nearbyint.f32(float [[LD6]])
1531 ; SSE2-NEXT: [[NEARBYINT7:%.*]] = call float @llvm.nearbyint.f32(float [[LD7]])
1532 ; SSE2-NEXT: [[NEARBYINT8:%.*]] = call float @llvm.nearbyint.f32(float [[LD8]])
1533 ; SSE2-NEXT: [[NEARBYINT9:%.*]] = call float @llvm.nearbyint.f32(float [[LD9]])
1534 ; SSE2-NEXT: [[NEARBYINT10:%.*]] = call float @llvm.nearbyint.f32(float [[LD10]])
1535 ; SSE2-NEXT: [[NEARBYINT11:%.*]] = call float @llvm.nearbyint.f32(float [[LD11]])
1536 ; SSE2-NEXT: [[NEARBYINT12:%.*]] = call float @llvm.nearbyint.f32(float [[LD12]])
1537 ; SSE2-NEXT: [[NEARBYINT13:%.*]] = call float @llvm.nearbyint.f32(float [[LD13]])
1538 ; SSE2-NEXT: [[NEARBYINT14:%.*]] = call float @llvm.nearbyint.f32(float [[LD14]])
1539 ; SSE2-NEXT: [[NEARBYINT15:%.*]] = call float @llvm.nearbyint.f32(float [[LD15]])
1540 ; SSE2-NEXT: store float [[NEARBYINT0]], ptr @dst32, align 4
1541 ; SSE2-NEXT: store float [[NEARBYINT1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1542 ; SSE2-NEXT: store float [[NEARBYINT2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1543 ; SSE2-NEXT: store float [[NEARBYINT3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1544 ; SSE2-NEXT: store float [[NEARBYINT4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1545 ; SSE2-NEXT: store float [[NEARBYINT5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
1546 ; SSE2-NEXT: store float [[NEARBYINT6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
1547 ; SSE2-NEXT: store float [[NEARBYINT7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
1548 ; SSE2-NEXT: store float [[NEARBYINT8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
1549 ; SSE2-NEXT: store float [[NEARBYINT9]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9), align 4
1550 ; SSE2-NEXT: store float [[NEARBYINT10]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
1551 ; SSE2-NEXT: store float [[NEARBYINT11]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
1552 ; SSE2-NEXT: store float [[NEARBYINT12]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
1553 ; SSE2-NEXT: store float [[NEARBYINT13]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
1554 ; SSE2-NEXT: store float [[NEARBYINT14]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
1555 ; SSE2-NEXT: store float [[NEARBYINT15]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
1556 ; SSE2-NEXT: ret void
1558 ; SSE41-LABEL: @nearbyint_16f32(
1559 ; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
1560 ; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP1]])
1561 ; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
1562 ; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1563 ; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP3]])
1564 ; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1565 ; SSE41-NEXT: [[TMP5:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
1566 ; SSE41-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP5]])
1567 ; SSE41-NEXT: store <4 x float> [[TMP6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
1568 ; SSE41-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
1569 ; SSE41-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP7]])
1570 ; SSE41-NEXT: store <4 x float> [[TMP8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
1571 ; SSE41-NEXT: ret void
1573 ; AVX1-LABEL: @nearbyint_16f32(
1574 ; AVX1-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
1575 ; AVX1-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> [[TMP1]])
1576 ; AVX1-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
1577 ; AVX1-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
1578 ; AVX1-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> [[TMP3]])
1579 ; AVX1-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
1580 ; AVX1-NEXT: ret void
1582 ; AVX2-LABEL: @nearbyint_16f32(
1583 ; AVX2-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
1584 ; AVX2-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> [[TMP1]])
1585 ; AVX2-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
1586 ; AVX2-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
1587 ; AVX2-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> [[TMP3]])
1588 ; AVX2-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
1589 ; AVX2-NEXT: ret void
1591 ; AVX512-LABEL: @nearbyint_16f32(
1592 ; AVX512-NEXT: [[TMP1:%.*]] = load <16 x float>, ptr @src32, align 4
1593 ; AVX512-NEXT: [[TMP2:%.*]] = call <16 x float> @llvm.nearbyint.v16f32(<16 x float> [[TMP1]])
1594 ; AVX512-NEXT: store <16 x float> [[TMP2]], ptr @dst32, align 4
1595 ; AVX512-NEXT: ret void
1597 %ld0 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 0 ), align 4
1598 %ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1 ), align 4
1599 %ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2 ), align 4
1600 %ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3 ), align 4
1601 %ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4 ), align 4
1602 %ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5 ), align 4
1603 %ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6 ), align 4
1604 %ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7 ), align 4
1605 %ld8 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8 ), align 4
1606 %ld9 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9 ), align 4
1607 %ld10 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
1608 %ld11 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
1609 %ld12 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
1610 %ld13 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
1611 %ld14 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
1612 %ld15 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
1613 %nearbyint0 = call float @llvm.nearbyint.f32(float %ld0 )
1614 %nearbyint1 = call float @llvm.nearbyint.f32(float %ld1 )
1615 %nearbyint2 = call float @llvm.nearbyint.f32(float %ld2 )
1616 %nearbyint3 = call float @llvm.nearbyint.f32(float %ld3 )
1617 %nearbyint4 = call float @llvm.nearbyint.f32(float %ld4 )
1618 %nearbyint5 = call float @llvm.nearbyint.f32(float %ld5 )
1619 %nearbyint6 = call float @llvm.nearbyint.f32(float %ld6 )
1620 %nearbyint7 = call float @llvm.nearbyint.f32(float %ld7 )
1621 %nearbyint8 = call float @llvm.nearbyint.f32(float %ld8 )
1622 %nearbyint9 = call float @llvm.nearbyint.f32(float %ld9 )
1623 %nearbyint10 = call float @llvm.nearbyint.f32(float %ld10)
1624 %nearbyint11 = call float @llvm.nearbyint.f32(float %ld11)
1625 %nearbyint12 = call float @llvm.nearbyint.f32(float %ld12)
1626 %nearbyint13 = call float @llvm.nearbyint.f32(float %ld13)
1627 %nearbyint14 = call float @llvm.nearbyint.f32(float %ld14)
1628 %nearbyint15 = call float @llvm.nearbyint.f32(float %ld15)
1629 store float %nearbyint0 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 0 ), align 4
1630 store float %nearbyint1 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1 ), align 4
1631 store float %nearbyint2 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2 ), align 4
1632 store float %nearbyint3 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3 ), align 4
1633 store float %nearbyint4 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4 ), align 4
1634 store float %nearbyint5 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5 ), align 4
1635 store float %nearbyint6 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6 ), align 4
1636 store float %nearbyint7 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7 ), align 4
1637 store float %nearbyint8 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8 ), align 4
1638 store float %nearbyint9 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9 ), align 4
1639 store float %nearbyint10, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
1640 store float %nearbyint11, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
1641 store float %nearbyint12, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
1642 store float %nearbyint13, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
1643 store float %nearbyint14, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
1644 store float %nearbyint15, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
1648 define void @rint_4f32() #0 {
1649 ; SSE2-LABEL: @rint_4f32(
1650 ; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
1651 ; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1652 ; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1653 ; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1654 ; SSE2-NEXT: [[RINT0:%.*]] = call float @llvm.rint.f32(float [[LD0]])
1655 ; SSE2-NEXT: [[RINT1:%.*]] = call float @llvm.rint.f32(float [[LD1]])
1656 ; SSE2-NEXT: [[RINT2:%.*]] = call float @llvm.rint.f32(float [[LD2]])
1657 ; SSE2-NEXT: [[RINT3:%.*]] = call float @llvm.rint.f32(float [[LD3]])
1658 ; SSE2-NEXT: store float [[RINT0]], ptr @dst32, align 4
1659 ; SSE2-NEXT: store float [[RINT1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1660 ; SSE2-NEXT: store float [[RINT2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1661 ; SSE2-NEXT: store float [[RINT3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1662 ; SSE2-NEXT: ret void
1664 ; SSE41-LABEL: @rint_4f32(
1665 ; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
1666 ; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP1]])
1667 ; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
1668 ; SSE41-NEXT: ret void
1670 ; AVX-LABEL: @rint_4f32(
1671 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
1672 ; AVX-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP1]])
1673 ; AVX-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
1674 ; AVX-NEXT: ret void
1676 %ld0 = load float, ptr @src32, align 4
1677 %ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1678 %ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1679 %ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1680 %rint0 = call float @llvm.rint.f32(float %ld0)
1681 %rint1 = call float @llvm.rint.f32(float %ld1)
1682 %rint2 = call float @llvm.rint.f32(float %ld2)
1683 %rint3 = call float @llvm.rint.f32(float %ld3)
1684 store float %rint0, ptr @dst32, align 4
1685 store float %rint1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1686 store float %rint2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1687 store float %rint3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1691 define void @rint_8f32() #0 {
1692 ; SSE2-LABEL: @rint_8f32(
1693 ; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
1694 ; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1695 ; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1696 ; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1697 ; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1698 ; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
1699 ; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
1700 ; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
1701 ; SSE2-NEXT: [[RINT0:%.*]] = call float @llvm.rint.f32(float [[LD0]])
1702 ; SSE2-NEXT: [[RINT1:%.*]] = call float @llvm.rint.f32(float [[LD1]])
1703 ; SSE2-NEXT: [[RINT2:%.*]] = call float @llvm.rint.f32(float [[LD2]])
1704 ; SSE2-NEXT: [[RINT3:%.*]] = call float @llvm.rint.f32(float [[LD3]])
1705 ; SSE2-NEXT: [[RINT4:%.*]] = call float @llvm.rint.f32(float [[LD4]])
1706 ; SSE2-NEXT: [[RINT5:%.*]] = call float @llvm.rint.f32(float [[LD5]])
1707 ; SSE2-NEXT: [[RINT6:%.*]] = call float @llvm.rint.f32(float [[LD6]])
1708 ; SSE2-NEXT: [[RINT7:%.*]] = call float @llvm.rint.f32(float [[LD7]])
1709 ; SSE2-NEXT: store float [[RINT0]], ptr @dst32, align 4
1710 ; SSE2-NEXT: store float [[RINT1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1711 ; SSE2-NEXT: store float [[RINT2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1712 ; SSE2-NEXT: store float [[RINT3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1713 ; SSE2-NEXT: store float [[RINT4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1714 ; SSE2-NEXT: store float [[RINT5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
1715 ; SSE2-NEXT: store float [[RINT6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
1716 ; SSE2-NEXT: store float [[RINT7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
1717 ; SSE2-NEXT: ret void
1719 ; SSE41-LABEL: @rint_8f32(
1720 ; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
1721 ; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP1]])
1722 ; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
1723 ; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1724 ; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP3]])
1725 ; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1726 ; SSE41-NEXT: ret void
1728 ; AVX-LABEL: @rint_8f32(
1729 ; AVX-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
1730 ; AVX-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.rint.v8f32(<8 x float> [[TMP1]])
1731 ; AVX-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
1732 ; AVX-NEXT: ret void
1734 %ld0 = load float, ptr @src32, align 4
1735 %ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1736 %ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1737 %ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1738 %ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1739 %ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
1740 %ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
1741 %ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
1742 %rint0 = call float @llvm.rint.f32(float %ld0)
1743 %rint1 = call float @llvm.rint.f32(float %ld1)
1744 %rint2 = call float @llvm.rint.f32(float %ld2)
1745 %rint3 = call float @llvm.rint.f32(float %ld3)
1746 %rint4 = call float @llvm.rint.f32(float %ld4)
1747 %rint5 = call float @llvm.rint.f32(float %ld5)
1748 %rint6 = call float @llvm.rint.f32(float %ld6)
1749 %rint7 = call float @llvm.rint.f32(float %ld7)
1750 store float %rint0, ptr @dst32, align 4
1751 store float %rint1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1752 store float %rint2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1753 store float %rint3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1754 store float %rint4, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1755 store float %rint5, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
1756 store float %rint6, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
1757 store float %rint7, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
1761 define void @rint_16f32() #0 {
1762 ; SSE2-LABEL: @rint_16f32(
1763 ; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
1764 ; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1765 ; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1766 ; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1767 ; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1768 ; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
1769 ; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
1770 ; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
1771 ; SSE2-NEXT: [[LD8:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
1772 ; SSE2-NEXT: [[LD9:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9), align 4
1773 ; SSE2-NEXT: [[LD10:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
1774 ; SSE2-NEXT: [[LD11:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
1775 ; SSE2-NEXT: [[LD12:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
1776 ; SSE2-NEXT: [[LD13:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
1777 ; SSE2-NEXT: [[LD14:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
1778 ; SSE2-NEXT: [[LD15:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
1779 ; SSE2-NEXT: [[RINT0:%.*]] = call float @llvm.rint.f32(float [[LD0]])
1780 ; SSE2-NEXT: [[RINT1:%.*]] = call float @llvm.rint.f32(float [[LD1]])
1781 ; SSE2-NEXT: [[RINT2:%.*]] = call float @llvm.rint.f32(float [[LD2]])
1782 ; SSE2-NEXT: [[RINT3:%.*]] = call float @llvm.rint.f32(float [[LD3]])
1783 ; SSE2-NEXT: [[RINT4:%.*]] = call float @llvm.rint.f32(float [[LD4]])
1784 ; SSE2-NEXT: [[RINT5:%.*]] = call float @llvm.rint.f32(float [[LD5]])
1785 ; SSE2-NEXT: [[RINT6:%.*]] = call float @llvm.rint.f32(float [[LD6]])
1786 ; SSE2-NEXT: [[RINT7:%.*]] = call float @llvm.rint.f32(float [[LD7]])
1787 ; SSE2-NEXT: [[RINT8:%.*]] = call float @llvm.rint.f32(float [[LD8]])
1788 ; SSE2-NEXT: [[RINT9:%.*]] = call float @llvm.rint.f32(float [[LD9]])
1789 ; SSE2-NEXT: [[RINT10:%.*]] = call float @llvm.rint.f32(float [[LD10]])
1790 ; SSE2-NEXT: [[RINT11:%.*]] = call float @llvm.rint.f32(float [[LD11]])
1791 ; SSE2-NEXT: [[RINT12:%.*]] = call float @llvm.rint.f32(float [[LD12]])
1792 ; SSE2-NEXT: [[RINT13:%.*]] = call float @llvm.rint.f32(float [[LD13]])
1793 ; SSE2-NEXT: [[RINT14:%.*]] = call float @llvm.rint.f32(float [[LD14]])
1794 ; SSE2-NEXT: [[RINT15:%.*]] = call float @llvm.rint.f32(float [[LD15]])
1795 ; SSE2-NEXT: store float [[RINT0]], ptr @dst32, align 4
1796 ; SSE2-NEXT: store float [[RINT1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1797 ; SSE2-NEXT: store float [[RINT2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1798 ; SSE2-NEXT: store float [[RINT3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1799 ; SSE2-NEXT: store float [[RINT4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1800 ; SSE2-NEXT: store float [[RINT5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
1801 ; SSE2-NEXT: store float [[RINT6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
1802 ; SSE2-NEXT: store float [[RINT7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
1803 ; SSE2-NEXT: store float [[RINT8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
1804 ; SSE2-NEXT: store float [[RINT9]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9), align 4
1805 ; SSE2-NEXT: store float [[RINT10]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
1806 ; SSE2-NEXT: store float [[RINT11]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
1807 ; SSE2-NEXT: store float [[RINT12]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
1808 ; SSE2-NEXT: store float [[RINT13]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
1809 ; SSE2-NEXT: store float [[RINT14]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
1810 ; SSE2-NEXT: store float [[RINT15]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
1811 ; SSE2-NEXT: ret void
1813 ; SSE41-LABEL: @rint_16f32(
1814 ; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
1815 ; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP1]])
1816 ; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
1817 ; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1818 ; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP3]])
1819 ; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1820 ; SSE41-NEXT: [[TMP5:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
1821 ; SSE41-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP5]])
1822 ; SSE41-NEXT: store <4 x float> [[TMP6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
1823 ; SSE41-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
1824 ; SSE41-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP7]])
1825 ; SSE41-NEXT: store <4 x float> [[TMP8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
1826 ; SSE41-NEXT: ret void
1828 ; AVX1-LABEL: @rint_16f32(
1829 ; AVX1-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
1830 ; AVX1-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.rint.v8f32(<8 x float> [[TMP1]])
1831 ; AVX1-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
1832 ; AVX1-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
1833 ; AVX1-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.rint.v8f32(<8 x float> [[TMP3]])
1834 ; AVX1-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
1835 ; AVX1-NEXT: ret void
1837 ; AVX2-LABEL: @rint_16f32(
1838 ; AVX2-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
1839 ; AVX2-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.rint.v8f32(<8 x float> [[TMP1]])
1840 ; AVX2-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
1841 ; AVX2-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
1842 ; AVX2-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.rint.v8f32(<8 x float> [[TMP3]])
1843 ; AVX2-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
1844 ; AVX2-NEXT: ret void
1846 ; AVX512-LABEL: @rint_16f32(
1847 ; AVX512-NEXT: [[TMP1:%.*]] = load <16 x float>, ptr @src32, align 4
1848 ; AVX512-NEXT: [[TMP2:%.*]] = call <16 x float> @llvm.rint.v16f32(<16 x float> [[TMP1]])
1849 ; AVX512-NEXT: store <16 x float> [[TMP2]], ptr @dst32, align 4
1850 ; AVX512-NEXT: ret void
1852 %ld0 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 0 ), align 4
1853 %ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1 ), align 4
1854 %ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2 ), align 4
1855 %ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3 ), align 4
1856 %ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4 ), align 4
1857 %ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5 ), align 4
1858 %ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6 ), align 4
1859 %ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7 ), align 4
1860 %ld8 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8 ), align 4
1861 %ld9 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9 ), align 4
1862 %ld10 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
1863 %ld11 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
1864 %ld12 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
1865 %ld13 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
1866 %ld14 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
1867 %ld15 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
1868 %rint0 = call float @llvm.rint.f32(float %ld0 )
1869 %rint1 = call float @llvm.rint.f32(float %ld1 )
1870 %rint2 = call float @llvm.rint.f32(float %ld2 )
1871 %rint3 = call float @llvm.rint.f32(float %ld3 )
1872 %rint4 = call float @llvm.rint.f32(float %ld4 )
1873 %rint5 = call float @llvm.rint.f32(float %ld5 )
1874 %rint6 = call float @llvm.rint.f32(float %ld6 )
1875 %rint7 = call float @llvm.rint.f32(float %ld7 )
1876 %rint8 = call float @llvm.rint.f32(float %ld8 )
1877 %rint9 = call float @llvm.rint.f32(float %ld9 )
1878 %rint10 = call float @llvm.rint.f32(float %ld10)
1879 %rint11 = call float @llvm.rint.f32(float %ld11)
1880 %rint12 = call float @llvm.rint.f32(float %ld12)
1881 %rint13 = call float @llvm.rint.f32(float %ld13)
1882 %rint14 = call float @llvm.rint.f32(float %ld14)
1883 %rint15 = call float @llvm.rint.f32(float %ld15)
1884 store float %rint0 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 0 ), align 4
1885 store float %rint1 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1 ), align 4
1886 store float %rint2 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2 ), align 4
1887 store float %rint3 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3 ), align 4
1888 store float %rint4 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4 ), align 4
1889 store float %rint5 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5 ), align 4
1890 store float %rint6 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6 ), align 4
1891 store float %rint7 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7 ), align 4
1892 store float %rint8 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8 ), align 4
1893 store float %rint9 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9 ), align 4
1894 store float %rint10, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
1895 store float %rint11, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
1896 store float %rint12, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
1897 store float %rint13, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
1898 store float %rint14, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
1899 store float %rint15, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
1903 define void @trunc_4f32() #0 {
1904 ; SSE2-LABEL: @trunc_4f32(
1905 ; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
1906 ; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1907 ; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1908 ; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1909 ; SSE2-NEXT: [[TRUNC0:%.*]] = call float @llvm.trunc.f32(float [[LD0]])
1910 ; SSE2-NEXT: [[TRUNC1:%.*]] = call float @llvm.trunc.f32(float [[LD1]])
1911 ; SSE2-NEXT: [[TRUNC2:%.*]] = call float @llvm.trunc.f32(float [[LD2]])
1912 ; SSE2-NEXT: [[TRUNC3:%.*]] = call float @llvm.trunc.f32(float [[LD3]])
1913 ; SSE2-NEXT: store float [[TRUNC0]], ptr @dst32, align 4
1914 ; SSE2-NEXT: store float [[TRUNC1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1915 ; SSE2-NEXT: store float [[TRUNC2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1916 ; SSE2-NEXT: store float [[TRUNC3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1917 ; SSE2-NEXT: ret void
1919 ; SSE41-LABEL: @trunc_4f32(
1920 ; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
1921 ; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP1]])
1922 ; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
1923 ; SSE41-NEXT: ret void
1925 ; AVX-LABEL: @trunc_4f32(
1926 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
1927 ; AVX-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP1]])
1928 ; AVX-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
1929 ; AVX-NEXT: ret void
1931 %ld0 = load float, ptr @src32, align 4
1932 %ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1933 %ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1934 %ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1935 %trunc0 = call float @llvm.trunc.f32(float %ld0)
1936 %trunc1 = call float @llvm.trunc.f32(float %ld1)
1937 %trunc2 = call float @llvm.trunc.f32(float %ld2)
1938 %trunc3 = call float @llvm.trunc.f32(float %ld3)
1939 store float %trunc0, ptr @dst32, align 4
1940 store float %trunc1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1941 store float %trunc2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1942 store float %trunc3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1946 define void @trunc_8f32() #0 {
1947 ; SSE2-LABEL: @trunc_8f32(
1948 ; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
1949 ; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1950 ; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1951 ; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1952 ; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1953 ; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
1954 ; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
1955 ; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
1956 ; SSE2-NEXT: [[TRUNC0:%.*]] = call float @llvm.trunc.f32(float [[LD0]])
1957 ; SSE2-NEXT: [[TRUNC1:%.*]] = call float @llvm.trunc.f32(float [[LD1]])
1958 ; SSE2-NEXT: [[TRUNC2:%.*]] = call float @llvm.trunc.f32(float [[LD2]])
1959 ; SSE2-NEXT: [[TRUNC3:%.*]] = call float @llvm.trunc.f32(float [[LD3]])
1960 ; SSE2-NEXT: [[TRUNC4:%.*]] = call float @llvm.trunc.f32(float [[LD4]])
1961 ; SSE2-NEXT: [[TRUNC5:%.*]] = call float @llvm.trunc.f32(float [[LD5]])
1962 ; SSE2-NEXT: [[TRUNC6:%.*]] = call float @llvm.trunc.f32(float [[LD6]])
1963 ; SSE2-NEXT: [[TRUNC7:%.*]] = call float @llvm.trunc.f32(float [[LD7]])
1964 ; SSE2-NEXT: store float [[TRUNC0]], ptr @dst32, align 4
1965 ; SSE2-NEXT: store float [[TRUNC1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
1966 ; SSE2-NEXT: store float [[TRUNC2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
1967 ; SSE2-NEXT: store float [[TRUNC3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
1968 ; SSE2-NEXT: store float [[TRUNC4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1969 ; SSE2-NEXT: store float [[TRUNC5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
1970 ; SSE2-NEXT: store float [[TRUNC6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
1971 ; SSE2-NEXT: store float [[TRUNC7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
1972 ; SSE2-NEXT: ret void
1974 ; SSE41-LABEL: @trunc_8f32(
1975 ; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
1976 ; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP1]])
1977 ; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
1978 ; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1979 ; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP3]])
1980 ; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
1981 ; SSE41-NEXT: ret void
1983 ; AVX-LABEL: @trunc_8f32(
1984 ; AVX-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
1985 ; AVX-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.trunc.v8f32(<8 x float> [[TMP1]])
1986 ; AVX-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
1987 ; AVX-NEXT: ret void
1989 %ld0 = load float, ptr @src32, align 4
1990 %ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
1991 %ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
1992 %ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
1993 %ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
1994 %ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
1995 %ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
1996 %ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
1997 %trunc0 = call float @llvm.trunc.f32(float %ld0)
1998 %trunc1 = call float @llvm.trunc.f32(float %ld1)
1999 %trunc2 = call float @llvm.trunc.f32(float %ld2)
2000 %trunc3 = call float @llvm.trunc.f32(float %ld3)
2001 %trunc4 = call float @llvm.trunc.f32(float %ld4)
2002 %trunc5 = call float @llvm.trunc.f32(float %ld5)
2003 %trunc6 = call float @llvm.trunc.f32(float %ld6)
2004 %trunc7 = call float @llvm.trunc.f32(float %ld7)
2005 store float %trunc0, ptr @dst32, align 4
2006 store float %trunc1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
2007 store float %trunc2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
2008 store float %trunc3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
2009 store float %trunc4, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
2010 store float %trunc5, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
2011 store float %trunc6, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
2012 store float %trunc7, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
2016 define void @trunc_16f32() #0 {
2017 ; SSE2-LABEL: @trunc_16f32(
2018 ; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
2019 ; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
2020 ; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
2021 ; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
2022 ; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
2023 ; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
2024 ; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
2025 ; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
2026 ; SSE2-NEXT: [[LD8:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
2027 ; SSE2-NEXT: [[LD9:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9), align 4
2028 ; SSE2-NEXT: [[LD10:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
2029 ; SSE2-NEXT: [[LD11:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
2030 ; SSE2-NEXT: [[LD12:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
2031 ; SSE2-NEXT: [[LD13:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
2032 ; SSE2-NEXT: [[LD14:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
2033 ; SSE2-NEXT: [[LD15:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
2034 ; SSE2-NEXT: [[TRUNC0:%.*]] = call float @llvm.trunc.f32(float [[LD0]])
2035 ; SSE2-NEXT: [[TRUNC1:%.*]] = call float @llvm.trunc.f32(float [[LD1]])
2036 ; SSE2-NEXT: [[TRUNC2:%.*]] = call float @llvm.trunc.f32(float [[LD2]])
2037 ; SSE2-NEXT: [[TRUNC3:%.*]] = call float @llvm.trunc.f32(float [[LD3]])
2038 ; SSE2-NEXT: [[TRUNC4:%.*]] = call float @llvm.trunc.f32(float [[LD4]])
2039 ; SSE2-NEXT: [[TRUNC5:%.*]] = call float @llvm.trunc.f32(float [[LD5]])
2040 ; SSE2-NEXT: [[TRUNC6:%.*]] = call float @llvm.trunc.f32(float [[LD6]])
2041 ; SSE2-NEXT: [[TRUNC7:%.*]] = call float @llvm.trunc.f32(float [[LD7]])
2042 ; SSE2-NEXT: [[TRUNC8:%.*]] = call float @llvm.trunc.f32(float [[LD8]])
2043 ; SSE2-NEXT: [[TRUNC9:%.*]] = call float @llvm.trunc.f32(float [[LD9]])
2044 ; SSE2-NEXT: [[TRUNC10:%.*]] = call float @llvm.trunc.f32(float [[LD10]])
2045 ; SSE2-NEXT: [[TRUNC11:%.*]] = call float @llvm.trunc.f32(float [[LD11]])
2046 ; SSE2-NEXT: [[TRUNC12:%.*]] = call float @llvm.trunc.f32(float [[LD12]])
2047 ; SSE2-NEXT: [[TRUNC13:%.*]] = call float @llvm.trunc.f32(float [[LD13]])
2048 ; SSE2-NEXT: [[TRUNC14:%.*]] = call float @llvm.trunc.f32(float [[LD14]])
2049 ; SSE2-NEXT: [[TRUNC15:%.*]] = call float @llvm.trunc.f32(float [[LD15]])
2050 ; SSE2-NEXT: store float [[TRUNC0]], ptr @dst32, align 4
2051 ; SSE2-NEXT: store float [[TRUNC1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
2052 ; SSE2-NEXT: store float [[TRUNC2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
2053 ; SSE2-NEXT: store float [[TRUNC3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
2054 ; SSE2-NEXT: store float [[TRUNC4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
2055 ; SSE2-NEXT: store float [[TRUNC5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
2056 ; SSE2-NEXT: store float [[TRUNC6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
2057 ; SSE2-NEXT: store float [[TRUNC7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
2058 ; SSE2-NEXT: store float [[TRUNC8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
2059 ; SSE2-NEXT: store float [[TRUNC9]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9), align 4
2060 ; SSE2-NEXT: store float [[TRUNC10]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
2061 ; SSE2-NEXT: store float [[TRUNC11]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
2062 ; SSE2-NEXT: store float [[TRUNC12]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
2063 ; SSE2-NEXT: store float [[TRUNC13]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
2064 ; SSE2-NEXT: store float [[TRUNC14]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
2065 ; SSE2-NEXT: store float [[TRUNC15]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
2066 ; SSE2-NEXT: ret void
2068 ; SSE41-LABEL: @trunc_16f32(
2069 ; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
2070 ; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP1]])
2071 ; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
2072 ; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
2073 ; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP3]])
2074 ; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
2075 ; SSE41-NEXT: [[TMP5:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
2076 ; SSE41-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP5]])
2077 ; SSE41-NEXT: store <4 x float> [[TMP6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
2078 ; SSE41-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
2079 ; SSE41-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP7]])
2080 ; SSE41-NEXT: store <4 x float> [[TMP8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
2081 ; SSE41-NEXT: ret void
2083 ; AVX1-LABEL: @trunc_16f32(
2084 ; AVX1-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
2085 ; AVX1-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.trunc.v8f32(<8 x float> [[TMP1]])
2086 ; AVX1-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
2087 ; AVX1-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
2088 ; AVX1-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.trunc.v8f32(<8 x float> [[TMP3]])
2089 ; AVX1-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
2090 ; AVX1-NEXT: ret void
2092 ; AVX2-LABEL: @trunc_16f32(
2093 ; AVX2-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
2094 ; AVX2-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.trunc.v8f32(<8 x float> [[TMP1]])
2095 ; AVX2-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
2096 ; AVX2-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
2097 ; AVX2-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.trunc.v8f32(<8 x float> [[TMP3]])
2098 ; AVX2-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
2099 ; AVX2-NEXT: ret void
2101 ; AVX512-LABEL: @trunc_16f32(
2102 ; AVX512-NEXT: [[TMP1:%.*]] = load <16 x float>, ptr @src32, align 4
2103 ; AVX512-NEXT: [[TMP2:%.*]] = call <16 x float> @llvm.trunc.v16f32(<16 x float> [[TMP1]])
2104 ; AVX512-NEXT: store <16 x float> [[TMP2]], ptr @dst32, align 4
2105 ; AVX512-NEXT: ret void
2107 %ld0 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 0 ), align 4
2108 %ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1 ), align 4
2109 %ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2 ), align 4
2110 %ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3 ), align 4
2111 %ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4 ), align 4
2112 %ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5 ), align 4
2113 %ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6 ), align 4
2114 %ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7 ), align 4
2115 %ld8 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8 ), align 4
2116 %ld9 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9 ), align 4
2117 %ld10 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
2118 %ld11 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
2119 %ld12 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
2120 %ld13 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
2121 %ld14 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
2122 %ld15 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
2123 %trunc0 = call float @llvm.trunc.f32(float %ld0 )
2124 %trunc1 = call float @llvm.trunc.f32(float %ld1 )
2125 %trunc2 = call float @llvm.trunc.f32(float %ld2 )
2126 %trunc3 = call float @llvm.trunc.f32(float %ld3 )
2127 %trunc4 = call float @llvm.trunc.f32(float %ld4 )
2128 %trunc5 = call float @llvm.trunc.f32(float %ld5 )
2129 %trunc6 = call float @llvm.trunc.f32(float %ld6 )
2130 %trunc7 = call float @llvm.trunc.f32(float %ld7 )
2131 %trunc8 = call float @llvm.trunc.f32(float %ld8 )
2132 %trunc9 = call float @llvm.trunc.f32(float %ld9 )
2133 %trunc10 = call float @llvm.trunc.f32(float %ld10)
2134 %trunc11 = call float @llvm.trunc.f32(float %ld11)
2135 %trunc12 = call float @llvm.trunc.f32(float %ld12)
2136 %trunc13 = call float @llvm.trunc.f32(float %ld13)
2137 %trunc14 = call float @llvm.trunc.f32(float %ld14)
2138 %trunc15 = call float @llvm.trunc.f32(float %ld15)
2139 store float %trunc0 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 0 ), align 4
2140 store float %trunc1 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1 ), align 4
2141 store float %trunc2 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2 ), align 4
2142 store float %trunc3 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3 ), align 4
2143 store float %trunc4 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4 ), align 4
2144 store float %trunc5 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5 ), align 4
2145 store float %trunc6 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6 ), align 4
2146 store float %trunc7 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7 ), align 4
2147 store float %trunc8 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8 ), align 4
2148 store float %trunc9 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9 ), align 4
2149 store float %trunc10, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
2150 store float %trunc11, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
2151 store float %trunc12, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
2152 store float %trunc13, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
2153 store float %trunc14, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
2154 store float %trunc15, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
2158 attributes #0 = { nounwind }