1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=vector-combine -S -mtriple=x86_64-- -mattr=sse2 | FileCheck %s --check-prefixes=CHECK,SSE2
3 ; RUN: opt < %s -passes=vector-combine -S -mtriple=x86_64-- -mattr=avx2 | FileCheck %s --check-prefixes=CHECK,AVX2
5 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
7 define float @matching_fp_scalar(ptr align 16 dereferenceable(16) %p) {
8 ; CHECK-LABEL: @matching_fp_scalar(
9 ; CHECK-NEXT: [[R:%.*]] = load float, ptr [[P:%.*]], align 16
10 ; CHECK-NEXT: ret float [[R]]
12 %r = load float, ptr %p, align 16
16 define float @matching_fp_scalar_volatile(ptr align 16 dereferenceable(16) %p) {
17 ; CHECK-LABEL: @matching_fp_scalar_volatile(
18 ; CHECK-NEXT: [[R:%.*]] = load volatile float, ptr [[P:%.*]], align 16
19 ; CHECK-NEXT: ret float [[R]]
21 %r = load volatile float, ptr %p, align 16
25 define double @larger_fp_scalar(ptr align 16 dereferenceable(16) %p) {
26 ; CHECK-LABEL: @larger_fp_scalar(
27 ; CHECK-NEXT: [[R:%.*]] = load double, ptr [[P:%.*]], align 16
28 ; CHECK-NEXT: ret double [[R]]
30 %r = load double, ptr %p, align 16
34 define float @smaller_fp_scalar(ptr align 16 dereferenceable(16) %p) {
35 ; CHECK-LABEL: @smaller_fp_scalar(
36 ; CHECK-NEXT: [[R:%.*]] = load float, ptr [[P:%.*]], align 16
37 ; CHECK-NEXT: ret float [[R]]
39 %r = load float, ptr %p, align 16
43 define float @matching_fp_vector(ptr align 16 dereferenceable(16) %p) {
44 ; CHECK-LABEL: @matching_fp_vector(
45 ; CHECK-NEXT: [[R:%.*]] = load float, ptr [[P:%.*]], align 16
46 ; CHECK-NEXT: ret float [[R]]
48 %r = load float, ptr %p, align 16
52 define float @matching_fp_vector_gep00(ptr align 16 dereferenceable(16) %p) {
53 ; CHECK-LABEL: @matching_fp_vector_gep00(
54 ; CHECK-NEXT: [[R:%.*]] = load float, ptr [[P:%.*]], align 16
55 ; CHECK-NEXT: ret float [[R]]
57 %r = load float, ptr %p, align 16
61 define float @matching_fp_vector_gep01(ptr align 16 dereferenceable(20) %p) {
62 ; CHECK-LABEL: @matching_fp_vector_gep01(
63 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <4 x float>, ptr [[P:%.*]], i64 0, i64 1
64 ; CHECK-NEXT: [[R:%.*]] = load float, ptr [[GEP]], align 4
65 ; CHECK-NEXT: ret float [[R]]
67 %gep = getelementptr inbounds <4 x float>, ptr %p, i64 0, i64 1
68 %r = load float, ptr %gep, align 4
72 define float @matching_fp_vector_gep01_deref(ptr align 16 dereferenceable(19) %p) {
73 ; CHECK-LABEL: @matching_fp_vector_gep01_deref(
74 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <4 x float>, ptr [[P:%.*]], i64 0, i64 1
75 ; CHECK-NEXT: [[R:%.*]] = load float, ptr [[GEP]], align 4
76 ; CHECK-NEXT: ret float [[R]]
78 %gep = getelementptr inbounds <4 x float>, ptr %p, i64 0, i64 1
79 %r = load float, ptr %gep, align 4
83 define float @matching_fp_vector_gep10(ptr align 16 dereferenceable(32) %p) {
84 ; CHECK-LABEL: @matching_fp_vector_gep10(
85 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <4 x float>, ptr [[P:%.*]], i64 1, i64 0
86 ; CHECK-NEXT: [[R:%.*]] = load float, ptr [[GEP]], align 16
87 ; CHECK-NEXT: ret float [[R]]
89 %gep = getelementptr inbounds <4 x float>, ptr %p, i64 1, i64 0
90 %r = load float, ptr %gep, align 16
94 define float @matching_fp_vector_gep10_deref(ptr align 16 dereferenceable(31) %p) {
95 ; CHECK-LABEL: @matching_fp_vector_gep10_deref(
96 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <4 x float>, ptr [[P:%.*]], i64 1, i64 0
97 ; CHECK-NEXT: [[R:%.*]] = load float, ptr [[GEP]], align 16
98 ; CHECK-NEXT: ret float [[R]]
100 %gep = getelementptr inbounds <4 x float>, ptr %p, i64 1, i64 0
101 %r = load float, ptr %gep, align 16
105 define float @nonmatching_int_vector(ptr align 16 dereferenceable(16) %p) {
106 ; CHECK-LABEL: @nonmatching_int_vector(
107 ; CHECK-NEXT: [[R:%.*]] = load float, ptr [[P:%.*]], align 16
108 ; CHECK-NEXT: ret float [[R]]
110 %r = load float, ptr %p, align 16
114 define double @less_aligned(ptr align 4 dereferenceable(16) %p) {
115 ; CHECK-LABEL: @less_aligned(
116 ; CHECK-NEXT: [[R:%.*]] = load double, ptr [[P:%.*]], align 4
117 ; CHECK-NEXT: ret double [[R]]
119 %r = load double, ptr %p, align 4
123 define float @matching_fp_scalar_small_deref(ptr align 16 dereferenceable(15) %p) {
124 ; CHECK-LABEL: @matching_fp_scalar_small_deref(
125 ; CHECK-NEXT: [[R:%.*]] = load float, ptr [[P:%.*]], align 16
126 ; CHECK-NEXT: ret float [[R]]
128 %r = load float, ptr %p, align 16
132 define i64 @larger_int_scalar(ptr align 16 dereferenceable(16) %p) {
133 ; CHECK-LABEL: @larger_int_scalar(
134 ; CHECK-NEXT: [[R:%.*]] = load i64, ptr [[P:%.*]], align 16
135 ; CHECK-NEXT: ret i64 [[R]]
137 %r = load i64, ptr %p, align 16
141 define i8 @smaller_int_scalar(ptr align 16 dereferenceable(16) %p) {
142 ; CHECK-LABEL: @smaller_int_scalar(
143 ; CHECK-NEXT: [[R:%.*]] = load i8, ptr [[P:%.*]], align 16
144 ; CHECK-NEXT: ret i8 [[R]]
146 %r = load i8, ptr %p, align 16
150 define double @larger_fp_scalar_256bit_vec(ptr align 32 dereferenceable(32) %p) {
151 ; CHECK-LABEL: @larger_fp_scalar_256bit_vec(
152 ; CHECK-NEXT: [[R:%.*]] = load double, ptr [[P:%.*]], align 32
153 ; CHECK-NEXT: ret double [[R]]
155 %r = load double, ptr %p, align 32
159 define <4 x float> @load_f32_insert_v4f32(ptr align 16 dereferenceable(16) %p) nofree nosync {
160 ; CHECK-LABEL: @load_f32_insert_v4f32(
161 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[P:%.*]], align 16
162 ; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> <i32 0, i32 poison, i32 poison, i32 poison>
163 ; CHECK-NEXT: ret <4 x float> [[R]]
165 %s = load float, ptr %p, align 4
166 %r = insertelement <4 x float> undef, float %s, i32 0
170 define <4 x float> @casted_load_f32_insert_v4f32(ptr align 4 dereferenceable(16) %p) nofree nosync {
171 ; CHECK-LABEL: @casted_load_f32_insert_v4f32(
172 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[P:%.*]], align 4
173 ; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> <i32 0, i32 poison, i32 poison, i32 poison>
174 ; CHECK-NEXT: ret <4 x float> [[R]]
176 %s = load float, ptr %p, align 4
177 %r = insertelement <4 x float> undef, float %s, i32 0
181 ; Element type does not change cost.
183 define <4 x i32> @load_i32_insert_v4i32(ptr align 16 dereferenceable(16) %p) nofree nosync {
184 ; CHECK-LABEL: @load_i32_insert_v4i32(
185 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[P:%.*]], align 16
186 ; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> <i32 0, i32 poison, i32 poison, i32 poison>
187 ; CHECK-NEXT: ret <4 x i32> [[R]]
189 %s = load i32, ptr %p, align 4
190 %r = insertelement <4 x i32> undef, i32 %s, i32 0
194 ; Pointer type does not change cost.
196 define <4 x i32> @casted_load_i32_insert_v4i32(ptr align 4 dereferenceable(16) %p) nofree nosync {
197 ; CHECK-LABEL: @casted_load_i32_insert_v4i32(
198 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[P:%.*]], align 4
199 ; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> <i32 0, i32 poison, i32 poison, i32 poison>
200 ; CHECK-NEXT: ret <4 x i32> [[R]]
202 %s = load i32, ptr %p, align 4
203 %r = insertelement <4 x i32> undef, i32 %s, i32 0
207 ; This is canonical form for vector element access.
209 define <4 x float> @gep00_load_f32_insert_v4f32(ptr align 16 dereferenceable(16) %p) nofree nosync {
210 ; CHECK-LABEL: @gep00_load_f32_insert_v4f32(
211 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[P:%.*]], align 16
212 ; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> <i32 0, i32 poison, i32 poison, i32 poison>
213 ; CHECK-NEXT: ret <4 x float> [[R]]
215 %s = load float, ptr %p, align 16
216 %r = insertelement <4 x float> undef, float %s, i64 0
220 ; Should work with addrspace as well.
222 define <4 x float> @gep00_load_f32_insert_v4f32_addrspace(ptr addrspace(44) align 16 dereferenceable(16) %p) nofree nosync {
223 ; CHECK-LABEL: @gep00_load_f32_insert_v4f32_addrspace(
224 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr addrspace(44) [[P:%.*]], align 16
225 ; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> <i32 0, i32 poison, i32 poison, i32 poison>
226 ; CHECK-NEXT: ret <4 x float> [[R]]
228 %s = load float, ptr addrspace(44) %p, align 16
229 %r = insertelement <4 x float> undef, float %s, i64 0
233 ; If there are enough dereferenceable bytes, we can offset the vector load.
235 define <8 x i16> @gep01_load_i16_insert_v8i16(ptr align 16 dereferenceable(18) %p) nofree nosync {
236 ; CHECK-LABEL: @gep01_load_i16_insert_v8i16(
237 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <8 x i16>, ptr [[P:%.*]], i64 0, i64 1
238 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[GEP]], align 2
239 ; CHECK-NEXT: [[R:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <8 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
240 ; CHECK-NEXT: ret <8 x i16> [[R]]
242 %gep = getelementptr inbounds <8 x i16>, ptr %p, i64 0, i64 1
243 %s = load i16, ptr %gep, align 2
244 %r = insertelement <8 x i16> undef, i16 %s, i64 0
248 ; Can't safely load the offset vector, but can load+shuffle if it is profitable.
250 define <8 x i16> @gep01_load_i16_insert_v8i16_deref(ptr align 16 dereferenceable(17) %p) nofree nosync {
251 ; SSE2-LABEL: @gep01_load_i16_insert_v8i16_deref(
252 ; SSE2-NEXT: [[GEP:%.*]] = getelementptr inbounds <8 x i16>, ptr [[P:%.*]], i64 0, i64 1
253 ; SSE2-NEXT: [[S:%.*]] = load i16, ptr [[GEP]], align 2
254 ; SSE2-NEXT: [[R:%.*]] = insertelement <8 x i16> undef, i16 [[S]], i64 0
255 ; SSE2-NEXT: ret <8 x i16> [[R]]
257 ; AVX2-LABEL: @gep01_load_i16_insert_v8i16_deref(
258 ; AVX2-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[P:%.*]], align 16
259 ; AVX2-NEXT: [[R:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <8 x i32> <i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
260 ; AVX2-NEXT: ret <8 x i16> [[R]]
262 %gep = getelementptr inbounds <8 x i16>, ptr %p, i64 0, i64 1
263 %s = load i16, ptr %gep, align 2
264 %r = insertelement <8 x i16> undef, i16 %s, i64 0
268 ; Verify that alignment of the new load is not over-specified.
270 define <8 x i16> @gep01_load_i16_insert_v8i16_deref_minalign(ptr align 2 dereferenceable(16) %p) nofree nosync {
271 ; SSE2-LABEL: @gep01_load_i16_insert_v8i16_deref_minalign(
272 ; SSE2-NEXT: [[GEP:%.*]] = getelementptr inbounds <8 x i16>, ptr [[P:%.*]], i64 0, i64 1
273 ; SSE2-NEXT: [[S:%.*]] = load i16, ptr [[GEP]], align 8
274 ; SSE2-NEXT: [[R:%.*]] = insertelement <8 x i16> undef, i16 [[S]], i64 0
275 ; SSE2-NEXT: ret <8 x i16> [[R]]
277 ; AVX2-LABEL: @gep01_load_i16_insert_v8i16_deref_minalign(
278 ; AVX2-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[P:%.*]], align 2
279 ; AVX2-NEXT: [[R:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <8 x i32> <i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
280 ; AVX2-NEXT: ret <8 x i16> [[R]]
282 %gep = getelementptr inbounds <8 x i16>, ptr %p, i64 0, i64 1
283 %s = load i16, ptr %gep, align 8
284 %r = insertelement <8 x i16> undef, i16 %s, i64 0
288 ; Negative test - if we are shuffling a load from the base pointer, the address offset
289 ; must be a multiple of element size.
290 ; TODO: Could bitcast around this limitation.
292 define <4 x i32> @gep01_bitcast_load_i32_insert_v4i32(ptr align 1 dereferenceable(16) %p) {
293 ; CHECK-LABEL: @gep01_bitcast_load_i32_insert_v4i32(
294 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <16 x i8>, ptr [[P:%.*]], i64 0, i64 1
295 ; CHECK-NEXT: [[S:%.*]] = load i32, ptr [[GEP]], align 1
296 ; CHECK-NEXT: [[R:%.*]] = insertelement <4 x i32> undef, i32 [[S]], i64 0
297 ; CHECK-NEXT: ret <4 x i32> [[R]]
299 %gep = getelementptr inbounds <16 x i8>, ptr %p, i64 0, i64 1
300 %s = load i32, ptr %gep, align 1
301 %r = insertelement <4 x i32> undef, i32 %s, i64 0
305 define <4 x i32> @gep012_bitcast_load_i32_insert_v4i32(ptr align 1 dereferenceable(20) %p) nofree nosync {
306 ; CHECK-LABEL: @gep012_bitcast_load_i32_insert_v4i32(
307 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[P:%.*]], align 1
308 ; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> <i32 3, i32 poison, i32 poison, i32 poison>
309 ; CHECK-NEXT: ret <4 x i32> [[R]]
311 %gep = getelementptr inbounds <16 x i8>, ptr %p, i64 0, i64 12
312 %s = load i32, ptr %gep, align 1
313 %r = insertelement <4 x i32> undef, i32 %s, i64 0
317 ; Negative test - if we are shuffling a load from the base pointer, the address offset
318 ; must be a multiple of element size and the offset must be low enough to fit in the vector
319 ; (bitcasting would not help this case).
321 define <4 x i32> @gep013_bitcast_load_i32_insert_v4i32(ptr align 1 dereferenceable(20) %p) nofree nosync {
322 ; CHECK-LABEL: @gep013_bitcast_load_i32_insert_v4i32(
323 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <16 x i8>, ptr [[P:%.*]], i64 0, i64 13
324 ; CHECK-NEXT: [[S:%.*]] = load i32, ptr [[GEP]], align 1
325 ; CHECK-NEXT: [[R:%.*]] = insertelement <4 x i32> undef, i32 [[S]], i64 0
326 ; CHECK-NEXT: ret <4 x i32> [[R]]
328 %gep = getelementptr inbounds <16 x i8>, ptr %p, i64 0, i64 13
329 %s = load i32, ptr %gep, align 1
330 %r = insertelement <4 x i32> undef, i32 %s, i64 0
334 ; If there are enough dereferenceable bytes, we can offset the vector load.
336 define <8 x i16> @gep10_load_i16_insert_v8i16(ptr align 16 dereferenceable(32) %p) nofree nosync {
337 ; CHECK-LABEL: @gep10_load_i16_insert_v8i16(
338 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <8 x i16>, ptr [[P:%.*]], i64 1, i64 0
339 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[GEP]], align 16
340 ; CHECK-NEXT: [[R:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <8 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
341 ; CHECK-NEXT: ret <8 x i16> [[R]]
343 %gep = getelementptr inbounds <8 x i16>, ptr %p, i64 1, i64 0
344 %s = load i16, ptr %gep, align 16
345 %r = insertelement <8 x i16> undef, i16 %s, i64 0
349 ; Negative test - disable under asan because widened load can cause spurious
350 ; use-after-poison issues when __asan_poison_memory_region is used.
352 define <8 x i16> @gep10_load_i16_insert_v8i16_asan(ptr align 16 dereferenceable(32) %p) sanitize_address nofree nosync {
353 ; CHECK-LABEL: @gep10_load_i16_insert_v8i16_asan(
354 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <8 x i16>, ptr [[P:%.*]], i64 1, i64 0
355 ; CHECK-NEXT: [[S:%.*]] = load i16, ptr [[GEP]], align 16
356 ; CHECK-NEXT: [[R:%.*]] = insertelement <8 x i16> undef, i16 [[S]], i64 0
357 ; CHECK-NEXT: ret <8 x i16> [[R]]
359 %gep = getelementptr inbounds <8 x i16>, ptr %p, i64 1, i64 0
360 %s = load i16, ptr %gep, align 16
361 %r = insertelement <8 x i16> undef, i16 %s, i64 0
365 ; hwasan and memtag should be similarly suppressed.
367 define <8 x i16> @gep10_load_i16_insert_v8i16_hwasan(ptr align 16 dereferenceable(32) %p) sanitize_hwaddress nofree nosync {
368 ; CHECK-LABEL: @gep10_load_i16_insert_v8i16_hwasan(
369 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <8 x i16>, ptr [[P:%.*]], i64 1, i64 0
370 ; CHECK-NEXT: [[S:%.*]] = load i16, ptr [[GEP]], align 16
371 ; CHECK-NEXT: [[R:%.*]] = insertelement <8 x i16> undef, i16 [[S]], i64 0
372 ; CHECK-NEXT: ret <8 x i16> [[R]]
374 %gep = getelementptr inbounds <8 x i16>, ptr %p, i64 1, i64 0
375 %s = load i16, ptr %gep, align 16
376 %r = insertelement <8 x i16> undef, i16 %s, i64 0
380 define <8 x i16> @gep10_load_i16_insert_v8i16_memtag(ptr align 16 dereferenceable(32) %p) sanitize_memtag nofree nosync {
381 ; CHECK-LABEL: @gep10_load_i16_insert_v8i16_memtag(
382 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <8 x i16>, ptr [[P:%.*]], i64 1, i64 0
383 ; CHECK-NEXT: [[S:%.*]] = load i16, ptr [[GEP]], align 16
384 ; CHECK-NEXT: [[R:%.*]] = insertelement <8 x i16> undef, i16 [[S]], i64 0
385 ; CHECK-NEXT: ret <8 x i16> [[R]]
387 %gep = getelementptr inbounds <8 x i16>, ptr %p, i64 1, i64 0
388 %s = load i16, ptr %gep, align 16
389 %r = insertelement <8 x i16> undef, i16 %s, i64 0
393 ; Negative test - disable under tsan because widened load may overlap bytes
394 ; being concurrently modified. tsan does not know that some bytes are undef.
396 define <8 x i16> @gep10_load_i16_insert_v8i16_tsan(ptr align 16 dereferenceable(32) %p) sanitize_thread nofree nosync {
397 ; CHECK-LABEL: @gep10_load_i16_insert_v8i16_tsan(
398 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <8 x i16>, ptr [[P:%.*]], i64 1, i64 0
399 ; CHECK-NEXT: [[S:%.*]] = load i16, ptr [[GEP]], align 16
400 ; CHECK-NEXT: [[R:%.*]] = insertelement <8 x i16> undef, i16 [[S]], i64 0
401 ; CHECK-NEXT: ret <8 x i16> [[R]]
403 %gep = getelementptr inbounds <8 x i16>, ptr %p, i64 1, i64 0
404 %s = load i16, ptr %gep, align 16
405 %r = insertelement <8 x i16> undef, i16 %s, i64 0
409 ; Negative test - can't safely load the offset vector, but could load+shuffle.
411 define <8 x i16> @gep10_load_i16_insert_v8i16_deref(ptr align 16 dereferenceable(31) %p) nofree nosync {
412 ; CHECK-LABEL: @gep10_load_i16_insert_v8i16_deref(
413 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <8 x i16>, ptr [[P:%.*]], i64 1, i64 0
414 ; CHECK-NEXT: [[S:%.*]] = load i16, ptr [[GEP]], align 16
415 ; CHECK-NEXT: [[R:%.*]] = insertelement <8 x i16> undef, i16 [[S]], i64 0
416 ; CHECK-NEXT: ret <8 x i16> [[R]]
418 %gep = getelementptr inbounds <8 x i16>, ptr %p, i64 1, i64 0
419 %s = load i16, ptr %gep, align 16
420 %r = insertelement <8 x i16> undef, i16 %s, i64 0
424 ; Negative test - do not alter volatile.
426 define <4 x float> @load_f32_insert_v4f32_volatile(ptr align 16 dereferenceable(16) %p) nofree nosync {
427 ; CHECK-LABEL: @load_f32_insert_v4f32_volatile(
428 ; CHECK-NEXT: [[S:%.*]] = load volatile float, ptr [[P:%.*]], align 4
429 ; CHECK-NEXT: [[R:%.*]] = insertelement <4 x float> undef, float [[S]], i32 0
430 ; CHECK-NEXT: ret <4 x float> [[R]]
432 %s = load volatile float, ptr %p, align 4
433 %r = insertelement <4 x float> undef, float %s, i32 0
437 ; Pointer is not as aligned as load, but that's ok.
438 ; The new load uses the larger alignment value.
440 define <4 x float> @load_f32_insert_v4f32_align(ptr align 1 dereferenceable(16) %p) nofree nosync {
441 ; CHECK-LABEL: @load_f32_insert_v4f32_align(
442 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[P:%.*]], align 4
443 ; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> <i32 0, i32 poison, i32 poison, i32 poison>
444 ; CHECK-NEXT: ret <4 x float> [[R]]
446 %s = load float, ptr %p, align 4
447 %r = insertelement <4 x float> undef, float %s, i32 0
451 ; Negative test - not enough bytes.
453 define <4 x float> @load_f32_insert_v4f32_deref(ptr align 4 dereferenceable(15) %p) nofree nosync {
454 ; CHECK-LABEL: @load_f32_insert_v4f32_deref(
455 ; CHECK-NEXT: [[S:%.*]] = load float, ptr [[P:%.*]], align 4
456 ; CHECK-NEXT: [[R:%.*]] = insertelement <4 x float> undef, float [[S]], i32 0
457 ; CHECK-NEXT: ret <4 x float> [[R]]
459 %s = load float, ptr %p, align 4
460 %r = insertelement <4 x float> undef, float %s, i32 0
464 define <8 x i32> @load_i32_insert_v8i32(ptr align 16 dereferenceable(16) %p) nofree nosync {
465 ; CHECK-LABEL: @load_i32_insert_v8i32(
466 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[P:%.*]], align 16
467 ; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <8 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
468 ; CHECK-NEXT: ret <8 x i32> [[R]]
470 %s = load i32, ptr %p, align 4
471 %r = insertelement <8 x i32> undef, i32 %s, i32 0
475 define <8 x i32> @casted_load_i32_insert_v8i32(ptr align 4 dereferenceable(16) %p) nofree nosync {
476 ; CHECK-LABEL: @casted_load_i32_insert_v8i32(
477 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[P:%.*]], align 4
478 ; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <8 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
479 ; CHECK-NEXT: ret <8 x i32> [[R]]
481 %s = load i32, ptr %p, align 4
482 %r = insertelement <8 x i32> undef, i32 %s, i32 0
486 define <16 x float> @load_f32_insert_v16f32(ptr align 16 dereferenceable(16) %p) nofree nosync {
487 ; CHECK-LABEL: @load_f32_insert_v16f32(
488 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[P:%.*]], align 16
489 ; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <16 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
490 ; CHECK-NEXT: ret <16 x float> [[R]]
492 %s = load float, ptr %p, align 4
493 %r = insertelement <16 x float> undef, float %s, i32 0
497 define <2 x float> @load_f32_insert_v2f32(ptr align 16 dereferenceable(16) %p) nofree nosync {
498 ; CHECK-LABEL: @load_f32_insert_v2f32(
499 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[P:%.*]], align 16
500 ; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <2 x i32> <i32 0, i32 poison>
501 ; CHECK-NEXT: ret <2 x float> [[R]]
503 %s = load float, ptr %p, align 4
504 %r = insertelement <2 x float> undef, float %s, i32 0
508 ; Negative test - suppress load widening for asan/hwasan/memtag/tsan.
510 define <2 x float> @load_f32_insert_v2f32_asan(ptr align 16 dereferenceable(16) %p) sanitize_address {
511 ; CHECK-LABEL: @load_f32_insert_v2f32_asan(
512 ; CHECK-NEXT: [[S:%.*]] = load float, ptr [[P:%.*]], align 4
513 ; CHECK-NEXT: [[R:%.*]] = insertelement <2 x float> undef, float [[S]], i32 0
514 ; CHECK-NEXT: ret <2 x float> [[R]]
516 %s = load float, ptr %p, align 4
517 %r = insertelement <2 x float> undef, float %s, i32 0
521 declare ptr @getscaleptr()
522 define void @PR47558_multiple_use_load(ptr nocapture nonnull %resultptr, ptr nocapture nonnull readonly %opptr) {
523 ; CHECK-LABEL: @PR47558_multiple_use_load(
524 ; CHECK-NEXT: [[SCALEPTR:%.*]] = tail call nonnull align 16 dereferenceable(64) ptr @getscaleptr()
525 ; CHECK-NEXT: [[OP:%.*]] = load <2 x float>, ptr [[OPPTR:%.*]], align 4
526 ; CHECK-NEXT: [[SCALE:%.*]] = load float, ptr [[SCALEPTR]], align 16
527 ; CHECK-NEXT: [[T1:%.*]] = insertelement <2 x float> undef, float [[SCALE]], i32 0
528 ; CHECK-NEXT: [[T2:%.*]] = insertelement <2 x float> [[T1]], float [[SCALE]], i32 1
529 ; CHECK-NEXT: [[T3:%.*]] = fmul <2 x float> [[OP]], [[T2]]
530 ; CHECK-NEXT: [[T4:%.*]] = extractelement <2 x float> [[T3]], i32 0
531 ; CHECK-NEXT: [[RESULT0:%.*]] = insertelement <2 x float> undef, float [[T4]], i32 0
532 ; CHECK-NEXT: [[T5:%.*]] = extractelement <2 x float> [[T3]], i32 1
533 ; CHECK-NEXT: [[RESULT1:%.*]] = insertelement <2 x float> [[RESULT0]], float [[T5]], i32 1
534 ; CHECK-NEXT: store <2 x float> [[RESULT1]], ptr [[RESULTPTR:%.*]], align 8
535 ; CHECK-NEXT: ret void
537 %scaleptr = tail call nonnull align 16 dereferenceable(64) ptr @getscaleptr()
538 %op = load <2 x float>, ptr %opptr, align 4
539 %scale = load float, ptr %scaleptr, align 16
540 %t1 = insertelement <2 x float> undef, float %scale, i32 0
541 %t2 = insertelement <2 x float> %t1, float %scale, i32 1
542 %t3 = fmul <2 x float> %op, %t2
543 %t4 = extractelement <2 x float> %t3, i32 0
544 %result0 = insertelement <2 x float> undef, float %t4, i32 0
545 %t5 = extractelement <2 x float> %t3, i32 1
546 %result1 = insertelement <2 x float> %result0, float %t5, i32 1
547 store <2 x float> %result1, ptr %resultptr, align 8
551 define <4 x float> @load_v2f32_extract_insert_v4f32(ptr align 16 dereferenceable(16) %p) nofree nosync {
552 ; CHECK-LABEL: @load_v2f32_extract_insert_v4f32(
553 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[P:%.*]], align 16
554 ; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> <i32 0, i32 poison, i32 poison, i32 poison>
555 ; CHECK-NEXT: ret <4 x float> [[R]]
557 %l = load <2 x float>, ptr %p, align 4
558 %s = extractelement <2 x float> %l, i32 0
559 %r = insertelement <4 x float> undef, float %s, i32 0
563 define <4 x float> @load_v8f32_extract_insert_v4f32(ptr align 16 dereferenceable(16) %p) nofree nosync {
564 ; CHECK-LABEL: @load_v8f32_extract_insert_v4f32(
565 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[P:%.*]], align 16
566 ; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> <i32 0, i32 poison, i32 poison, i32 poison>
567 ; CHECK-NEXT: ret <4 x float> [[R]]
569 %l = load <8 x float>, ptr %p, align 4
570 %s = extractelement <8 x float> %l, i32 0
571 %r = insertelement <4 x float> undef, float %s, i32 0
575 define <8 x i32> @load_v1i32_extract_insert_v8i32_extra_use(ptr align 16 dereferenceable(16) %p, ptr %store_ptr) nofree nosync {
576 ; CHECK-LABEL: @load_v1i32_extract_insert_v8i32_extra_use(
577 ; CHECK-NEXT: [[L:%.*]] = load <1 x i32>, ptr [[P:%.*]], align 4
578 ; CHECK-NEXT: store <1 x i32> [[L]], ptr [[STORE_PTR:%.*]], align 4
579 ; CHECK-NEXT: [[S:%.*]] = extractelement <1 x i32> [[L]], i32 0
580 ; CHECK-NEXT: [[R:%.*]] = insertelement <8 x i32> undef, i32 [[S]], i32 0
581 ; CHECK-NEXT: ret <8 x i32> [[R]]
583 %l = load <1 x i32>, ptr %p, align 4
584 store <1 x i32> %l, ptr %store_ptr
585 %s = extractelement <1 x i32> %l, i32 0
586 %r = insertelement <8 x i32> undef, i32 %s, i32 0
590 ; Can't safely load the offset vector, but can load+shuffle if it is profitable.
592 define <8 x i16> @gep1_load_v2i16_extract_insert_v8i16(ptr align 1 dereferenceable(16) %p) nofree nosync {
593 ; SSE2-LABEL: @gep1_load_v2i16_extract_insert_v8i16(
594 ; SSE2-NEXT: [[GEP:%.*]] = getelementptr inbounds <2 x i16>, ptr [[P:%.*]], i64 1
595 ; SSE2-NEXT: [[TMP1:%.*]] = getelementptr inbounds <2 x i16>, ptr [[GEP]], i32 0, i32 0
596 ; SSE2-NEXT: [[S:%.*]] = load i16, ptr [[TMP1]], align 8
597 ; SSE2-NEXT: [[R:%.*]] = insertelement <8 x i16> undef, i16 [[S]], i64 0
598 ; SSE2-NEXT: ret <8 x i16> [[R]]
600 ; AVX2-LABEL: @gep1_load_v2i16_extract_insert_v8i16(
601 ; AVX2-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[P:%.*]], align 4
602 ; AVX2-NEXT: [[R:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <8 x i32> <i32 2, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
603 ; AVX2-NEXT: ret <8 x i16> [[R]]
605 %gep = getelementptr inbounds <2 x i16>, ptr %p, i64 1
606 %l = load <2 x i16>, ptr %gep, align 8
607 %s = extractelement <2 x i16> %l, i32 0
608 %r = insertelement <8 x i16> undef, i16 %s, i64 0
612 ; PR30986 - split vector loads for scalarized operations
613 define <2 x i64> @PR30986(ptr %0) {
614 ; CHECK-LABEL: @PR30986(
615 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds <2 x i64>, ptr [[TMP0:%.*]], i32 0, i32 0
616 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 16
617 ; CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.ctpop.i64(i64 [[TMP3]])
618 ; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x i64> undef, i64 [[TMP4]], i32 0
619 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds <2 x i64>, ptr [[TMP0]], i32 0, i32 1
620 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr [[TMP6]], align 8
621 ; CHECK-NEXT: [[TMP8:%.*]] = tail call i64 @llvm.ctpop.i64(i64 [[TMP7]])
622 ; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i64> [[TMP5]], i64 [[TMP8]], i32 1
623 ; CHECK-NEXT: ret <2 x i64> [[TMP9]]
625 %2 = load <2 x i64>, ptr %0, align 16
626 %3 = extractelement <2 x i64> %2, i32 0
627 %4 = tail call i64 @llvm.ctpop.i64(i64 %3)
628 %5 = insertelement <2 x i64> undef, i64 %4, i32 0
629 %6 = extractelement <2 x i64> %2, i32 1
630 %7 = tail call i64 @llvm.ctpop.i64(i64 %6)
631 %8 = insertelement <2 x i64> %5, i64 %7, i32 1
634 declare i64 @llvm.ctpop.i64(i64)