1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=vector-combine -S -mtriple=x86_64-- -mattr=sse2 --data-layout="e" | FileCheck %s --check-prefixes=CHECK,SSE
3 ; RUN: opt < %s -passes=vector-combine -S -mtriple=x86_64-- -mattr=avx2 --data-layout="e" | FileCheck %s --check-prefixes=CHECK,AVX
4 ; RUN: opt < %s -passes=vector-combine -S -mtriple=x86_64-- -mattr=sse2 --data-layout="E" | FileCheck %s --check-prefixes=CHECK,SSE
5 ; RUN: opt < %s -passes=vector-combine -S -mtriple=x86_64-- -mattr=avx2 --data-layout="E" | FileCheck %s --check-prefixes=CHECK,AVX
7 ;-------------------------------------------------------------------------------
8 ; Here we know we can load 128 bits as per dereferenceability and alignment.
10 ; We don't widen scalar loads per-se.
11 define <1 x float> @scalar(ptr align 16 dereferenceable(16) %p) {
12 ; CHECK-LABEL: @scalar(
13 ; CHECK-NEXT: [[R:%.*]] = load <1 x float>, ptr [[P:%.*]], align 16
14 ; CHECK-NEXT: ret <1 x float> [[R]]
16 %r = load <1 x float>, ptr %p, align 16
20 ; We don't widen single-element loads, these get scalarized.
21 define <1 x float> @vec_with_1elt(ptr align 16 dereferenceable(16) %p) {
22 ; CHECK-LABEL: @vec_with_1elt(
23 ; CHECK-NEXT: [[R:%.*]] = load <1 x float>, ptr [[P:%.*]], align 16
24 ; CHECK-NEXT: ret <1 x float> [[R]]
26 %r = load <1 x float>, ptr %p, align 16
30 define <2 x float> @vec_with_2elts(ptr align 16 dereferenceable(16) %p) {
31 ; CHECK-LABEL: @vec_with_2elts(
32 ; CHECK-NEXT: [[R:%.*]] = load <2 x float>, ptr [[P:%.*]], align 16
33 ; CHECK-NEXT: ret <2 x float> [[R]]
35 %r = load <2 x float>, ptr %p, align 16
39 define <3 x float> @vec_with_3elts(ptr align 16 dereferenceable(16) %p) {
40 ; CHECK-LABEL: @vec_with_3elts(
41 ; CHECK-NEXT: [[R:%.*]] = load <3 x float>, ptr [[P:%.*]], align 16
42 ; CHECK-NEXT: ret <3 x float> [[R]]
44 %r = load <3 x float>, ptr %p, align 16
48 ; Full-vector load. All good already.
49 define <4 x float> @vec_with_4elts(ptr align 16 dereferenceable(16) %p) {
50 ; CHECK-LABEL: @vec_with_4elts(
51 ; CHECK-NEXT: [[R:%.*]] = load <4 x float>, ptr [[P:%.*]], align 16
52 ; CHECK-NEXT: ret <4 x float> [[R]]
54 %r = load <4 x float>, ptr %p, align 16
58 ; We don't know we can load 256 bits though.
59 define <5 x float> @vec_with_5elts(ptr align 16 dereferenceable(16) %p) {
60 ; CHECK-LABEL: @vec_with_5elts(
61 ; CHECK-NEXT: [[R:%.*]] = load <5 x float>, ptr [[P:%.*]], align 16
62 ; CHECK-NEXT: ret <5 x float> [[R]]
64 %r = load <5 x float>, ptr %p, align 16
68 ;-------------------------------------------------------------------------------
70 ; We can load 128 bits, and the fact that it's underaligned isn't relevant.
71 define <3 x float> @vec_with_3elts_underaligned(ptr align 8 dereferenceable(16) %p) {
72 ; CHECK-LABEL: @vec_with_3elts_underaligned(
73 ; CHECK-NEXT: [[R:%.*]] = load <3 x float>, ptr [[P:%.*]], align 8
74 ; CHECK-NEXT: ret <3 x float> [[R]]
76 %r = load <3 x float>, ptr %p, align 8
80 ; We don't know we can load 128 bits, but since it's aligned, we still can do wide load.
81 ; FIXME: this should still get widened.
82 define <3 x float> @vec_with_3elts_underdereferenceable(<3 x float>* align 16 dereferenceable(12) %p) {
83 ; CHECK-LABEL: @vec_with_3elts_underdereferenceable(
84 ; CHECK-NEXT: [[R:%.*]] = load <3 x float>, ptr [[P:%.*]], align 16
85 ; CHECK-NEXT: ret <3 x float> [[R]]
87 %r = load <3 x float>, ptr %p, align 16
91 ; We can't tell if we can load 128 bits.
92 define <3 x float> @vec_with_3elts_underaligned_underdereferenceable(ptr align 8 dereferenceable(12) %p) {
93 ; CHECK-LABEL: @vec_with_3elts_underaligned_underdereferenceable(
94 ; CHECK-NEXT: [[R:%.*]] = load <3 x float>, ptr [[P:%.*]], align 8
95 ; CHECK-NEXT: ret <3 x float> [[R]]
97 %r = load <3 x float>, ptr %p, align 8
101 ;-------------------------------------------------------------------------------
102 ; Here we know we can load 256 bits as per dereferenceability and alignment.
104 define <1 x float> @vec_with_1elt_256bits(ptr align 32 dereferenceable(32) %p) {
105 ; CHECK-LABEL: @vec_with_1elt_256bits(
106 ; CHECK-NEXT: [[R:%.*]] = load <1 x float>, ptr [[P:%.*]], align 32
107 ; CHECK-NEXT: ret <1 x float> [[R]]
109 %r = load <1 x float>, ptr %p, align 32
113 define <2 x float> @vec_with_2elts_256bits(ptr align 32 dereferenceable(32) %p) {
114 ; CHECK-LABEL: @vec_with_2elts_256bits(
115 ; CHECK-NEXT: [[R:%.*]] = load <2 x float>, ptr [[P:%.*]], align 32
116 ; CHECK-NEXT: ret <2 x float> [[R]]
118 %r = load <2 x float>, ptr %p, align 32
122 define <3 x float> @vec_with_3elts_256bits(ptr align 32 dereferenceable(32) %p) {
123 ; CHECK-LABEL: @vec_with_3elts_256bits(
124 ; CHECK-NEXT: [[R:%.*]] = load <3 x float>, ptr [[P:%.*]], align 32
125 ; CHECK-NEXT: ret <3 x float> [[R]]
127 %r = load <3 x float>, ptr %p, align 32
131 define <4 x float> @vec_with_4elts_256bits(ptr align 32 dereferenceable(32) %p) {
132 ; CHECK-LABEL: @vec_with_4elts_256bits(
133 ; CHECK-NEXT: [[R:%.*]] = load <4 x float>, ptr [[P:%.*]], align 32
134 ; CHECK-NEXT: ret <4 x float> [[R]]
136 %r = load <4 x float>, ptr %p, align 32
140 define <5 x float> @vec_with_5elts_256bits(ptr align 32 dereferenceable(32) %p) {
141 ; CHECK-LABEL: @vec_with_5elts_256bits(
142 ; CHECK-NEXT: [[R:%.*]] = load <5 x float>, ptr [[P:%.*]], align 32
143 ; CHECK-NEXT: ret <5 x float> [[R]]
145 %r = load <5 x float>, ptr %p, align 32
149 define <6 x float> @vec_with_6elts_256bits(ptr align 32 dereferenceable(32) %p) {
150 ; CHECK-LABEL: @vec_with_6elts_256bits(
151 ; CHECK-NEXT: [[R:%.*]] = load <6 x float>, ptr [[P:%.*]], align 32
152 ; CHECK-NEXT: ret <6 x float> [[R]]
154 %r = load <6 x float>, ptr %p, align 32
158 define <7 x float> @vec_with_7elts_256bits(ptr align 32 dereferenceable(32) %p) {
159 ; CHECK-LABEL: @vec_with_7elts_256bits(
160 ; CHECK-NEXT: [[R:%.*]] = load <7 x float>, ptr [[P:%.*]], align 32
161 ; CHECK-NEXT: ret <7 x float> [[R]]
163 %r = load <7 x float>, ptr %p, align 32
167 ; Full-vector load. All good already.
168 define <8 x float> @vec_with_8elts_256bits(ptr align 32 dereferenceable(32) %p) {
169 ; CHECK-LABEL: @vec_with_8elts_256bits(
170 ; CHECK-NEXT: [[R:%.*]] = load <8 x float>, ptr [[P:%.*]], align 32
171 ; CHECK-NEXT: ret <8 x float> [[R]]
173 %r = load <8 x float>, ptr %p, align 32
177 ; We can't tell if we can load more than 256 bits.
178 define <9 x float> @vec_with_9elts_256bits(ptr align 32 dereferenceable(32) %p) {
179 ; CHECK-LABEL: @vec_with_9elts_256bits(
180 ; CHECK-NEXT: [[R:%.*]] = load <9 x float>, ptr [[P:%.*]], align 32
181 ; CHECK-NEXT: ret <9 x float> [[R]]
183 %r = load <9 x float>, ptr %p, align 32
187 ;-------------------------------------------------------------------------------
189 ; Weird types we don't deal with
190 define <2 x i7> @vec_with_two_subbyte_elts(ptr align 16 dereferenceable(16) %p) {
191 ; CHECK-LABEL: @vec_with_two_subbyte_elts(
192 ; CHECK-NEXT: [[R:%.*]] = load <2 x i7>, ptr [[P:%.*]], align 16
193 ; CHECK-NEXT: ret <2 x i7> [[R]]
195 %r = load <2 x i7>, ptr %p, align 16
199 define <2 x i9> @vec_with_two_nonbyte_sized_elts(ptr align 16 dereferenceable(16) %p) {
200 ; CHECK-LABEL: @vec_with_two_nonbyte_sized_elts(
201 ; CHECK-NEXT: [[R:%.*]] = load <2 x i9>, ptr [[P:%.*]], align 16
202 ; CHECK-NEXT: ret <2 x i9> [[R]]
204 %r = load <2 x i9>, ptr %p, align 16
208 define <2 x i24> @vec_with_two_nonpoweroftwo_sized_elts(ptr align 16 dereferenceable(16) %p) {
209 ; CHECK-LABEL: @vec_with_two_nonpoweroftwo_sized_elts(
210 ; CHECK-NEXT: [[R:%.*]] = load <2 x i24>, ptr [[P:%.*]], align 16
211 ; CHECK-NEXT: ret <2 x i24> [[R]]
213 %r = load <2 x i24>, ptr %p, align 16
217 define <2 x float> @vec_with_2elts_addressspace(ptr addrspace(2) align 16 dereferenceable(16) %p) {
218 ; CHECK-LABEL: @vec_with_2elts_addressspace(
219 ; CHECK-NEXT: [[R:%.*]] = load <2 x float>, ptr addrspace(2) [[P:%.*]], align 16
220 ; CHECK-NEXT: ret <2 x float> [[R]]
222 %r = load <2 x float>, ptr addrspace(2) %p, align 16
226 ;-------------------------------------------------------------------------------
228 ; Widening these would change the legalized type, so leave them alone.
230 define <2 x i1> @vec_with_2elts_128bits_i1(ptr align 16 dereferenceable(16) %p) {
231 ; CHECK-LABEL: @vec_with_2elts_128bits_i1(
232 ; CHECK-NEXT: [[R:%.*]] = load <2 x i1>, ptr [[P:%.*]], align 16
233 ; CHECK-NEXT: ret <2 x i1> [[R]]
235 %r = load <2 x i1>, ptr %p, align 16
238 define <2 x i2> @vec_with_2elts_128bits_i2(ptr align 16 dereferenceable(16) %p) {
239 ; CHECK-LABEL: @vec_with_2elts_128bits_i2(
240 ; CHECK-NEXT: [[R:%.*]] = load <2 x i2>, ptr [[P:%.*]], align 16
241 ; CHECK-NEXT: ret <2 x i2> [[R]]
243 %r = load <2 x i2>, ptr %p, align 16
246 define <2 x i4> @vec_with_2elts_128bits_i4(ptr align 16 dereferenceable(16) %p) {
247 ; CHECK-LABEL: @vec_with_2elts_128bits_i4(
248 ; CHECK-NEXT: [[R:%.*]] = load <2 x i4>, ptr [[P:%.*]], align 16
249 ; CHECK-NEXT: ret <2 x i4> [[R]]
251 %r = load <2 x i4>, ptr %p, align 16
255 ; Load the 128-bit vector because there is no additional cost.
257 define <4 x float> @load_v1f32_v4f32(ptr dereferenceable(16) %p) {
258 ; CHECK-LABEL: @load_v1f32_v4f32(
259 ; CHECK-NEXT: [[S:%.*]] = load <4 x float>, ptr [[P:%.*]], align 16
260 ; CHECK-NEXT: ret <4 x float> [[S]]
262 %l = load <1 x float>, ptr %p, align 16
263 %s = shufflevector <1 x float> %l, <1 x float> poison, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
267 ; Load the 128-bit vector because there is no additional cost.
268 ; Alignment is taken from param attr.
270 define <4 x float> @load_v2f32_v4f32(ptr align 16 dereferenceable(16) %p) {
271 ; CHECK-LABEL: @load_v2f32_v4f32(
272 ; CHECK-NEXT: [[S:%.*]] = load <4 x float>, ptr [[P:%.*]], align 16
273 ; CHECK-NEXT: ret <4 x float> [[S]]
275 %l = load <2 x float>, ptr %p, align 1
276 %s = shufflevector <2 x float> %l, <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
280 ; Load the 128-bit vector because there is no additional cost.
282 define <4 x float> @load_v3f32_v4f32(ptr dereferenceable(16) %p) {
283 ; CHECK-LABEL: @load_v3f32_v4f32(
284 ; CHECK-NEXT: [[S:%.*]] = load <4 x float>, ptr [[P:%.*]], align 1
285 ; CHECK-NEXT: ret <4 x float> [[S]]
287 %l = load <3 x float>, ptr %p, align 1
288 %s = shufflevector <3 x float> %l, <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
292 ; Negative test - the shuffle must be a simple subvector insert.
294 define <4 x float> @load_v3f32_v4f32_wrong_mask(ptr dereferenceable(16) %p) {
295 ; CHECK-LABEL: @load_v3f32_v4f32_wrong_mask(
296 ; CHECK-NEXT: [[L:%.*]] = load <3 x float>, ptr [[P:%.*]], align 1
297 ; CHECK-NEXT: [[S:%.*]] = shufflevector <3 x float> [[L]], <3 x float> poison, <4 x i32> <i32 1, i32 0, i32 2, i32 poison>
298 ; CHECK-NEXT: ret <4 x float> [[S]]
300 %l = load <3 x float>, ptr %p, align 1
301 %s = shufflevector <3 x float> %l, <3 x float> poison, <4 x i32> <i32 1, i32 0, i32 2, i32 undef>
305 ; Negative test - must be dereferenceable to vector width.
307 define <4 x float> @load_v3f32_v4f32_not_deref(ptr dereferenceable(15) %p) {
308 ; CHECK-LABEL: @load_v3f32_v4f32_not_deref(
309 ; CHECK-NEXT: [[L:%.*]] = load <3 x float>, ptr [[P:%.*]], align 16
310 ; CHECK-NEXT: [[S:%.*]] = shufflevector <3 x float> [[L]], <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
311 ; CHECK-NEXT: ret <4 x float> [[S]]
313 %l = load <3 x float>, ptr %p, align 16
314 %s = shufflevector <3 x float> %l, <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
318 ; Without AVX, the cost of loading 256-bits would be greater.
320 define <8 x float> @load_v2f32_v8f32(ptr dereferenceable(32) %p) {
321 ; SSE-LABEL: @load_v2f32_v8f32(
322 ; SSE-NEXT: [[L:%.*]] = load <2 x float>, ptr [[P:%.*]], align 1
323 ; SSE-NEXT: [[S:%.*]] = shufflevector <2 x float> [[L]], <2 x float> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
324 ; SSE-NEXT: ret <8 x float> [[S]]
326 ; AVX-LABEL: @load_v2f32_v8f32(
327 ; AVX-NEXT: [[S:%.*]] = load <8 x float>, ptr [[P:%.*]], align 1
328 ; AVX-NEXT: ret <8 x float> [[S]]
330 %l = load <2 x float>, ptr %p, align 1
331 %s = shufflevector <2 x float> %l, <2 x float> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
335 ; Integer type is ok too.
337 define <4 x i32> @load_v2i32_v4i32(ptr dereferenceable(16) %p) {
338 ; CHECK-LABEL: @load_v2i32_v4i32(
339 ; CHECK-NEXT: [[S:%.*]] = load <4 x i32>, ptr [[P:%.*]], align 1
340 ; CHECK-NEXT: ret <4 x i32> [[S]]
342 %l = load <2 x i32>, ptr %p, align 1
343 %s = shufflevector <2 x i32> %l, <2 x i32> poison, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
347 ; TODO: We assumed the shuffle mask is canonical.
349 define <4 x i32> @load_v2i32_v4i32_non_canonical_mask(ptr dereferenceable(16) %p) {
350 ; CHECK-LABEL: @load_v2i32_v4i32_non_canonical_mask(
351 ; CHECK-NEXT: [[L:%.*]] = load <2 x i32>, ptr [[P:%.*]], align 1
352 ; CHECK-NEXT: [[S:%.*]] = shufflevector <2 x i32> [[L]], <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
353 ; CHECK-NEXT: ret <4 x i32> [[S]]
355 %l = load <2 x i32>, ptr %p, align 1
356 %s = shufflevector <2 x i32> %l, <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
360 ; Allow non-canonical commuted shuffle.
362 define <4 x i32> @load_v2i32_v4i32_non_canonical_mask_commute(ptr dereferenceable(16) %p) {
363 ; CHECK-LABEL: @load_v2i32_v4i32_non_canonical_mask_commute(
364 ; CHECK-NEXT: [[S:%.*]] = load <4 x i32>, ptr [[P:%.*]], align 1
365 ; CHECK-NEXT: ret <4 x i32> [[S]]
367 %l = load <2 x i32>, ptr %p, align 1
368 %s = shufflevector <2 x i32> poison, <2 x i32> %l, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
372 ; The wide load must be in the same addrspace as the original load.
374 define <4 x i32> @load_v2i32_v4i32_addrspacecast(ptr addrspace(5) align 16 dereferenceable(16) %p) {
375 ; CHECK-LABEL: @load_v2i32_v4i32_addrspacecast(
376 ; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(5) [[P:%.*]] to ptr addrspace(42)
377 ; CHECK-NEXT: [[S:%.*]] = load <4 x i32>, ptr addrspace(42) [[TMP1]], align 16
378 ; CHECK-NEXT: ret <4 x i32> [[S]]
380 %asc = addrspacecast ptr addrspace(5) %p to ptr addrspace(42)
381 %l = load <2 x i32>, ptr addrspace(42) %asc, align 4
382 %s = shufflevector <2 x i32> %l, <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>