1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,-slow-unaligned-mem-32 | FileCheck %s --check-prefix=ALL --check-prefix=FAST32
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+slow-unaligned-mem-32 | FileCheck %s --check-prefix=ALL --check-prefix=SLOW32
5 define <4 x float> @merge_2_floats(float* nocapture %p) nounwind readonly {
6 ; ALL-LABEL: merge_2_floats:
8 ; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
10 %tmp1 = load float, float* %p
11 %vecins = insertelement <4 x float> undef, float %tmp1, i32 0
12 %add.ptr = getelementptr float, float* %p, i32 1
13 %tmp5 = load float, float* %add.ptr
14 %vecins7 = insertelement <4 x float> %vecins, float %tmp5, i32 1
15 ret <4 x float> %vecins7
18 ; Test-case generated due to a crash when trying to treat loading the first
19 ; two i64s of a <4 x i64> as a load of two i32s.
20 define <4 x i64> @merge_2_floats_into_4() {
21 ; ALL-LABEL: merge_2_floats_into_4:
23 ; ALL-NEXT: movq (%rax), %rax
24 ; ALL-NEXT: vmovups (%rax), %xmm0
26 %1 = load i64*, i64** undef, align 8
27 %2 = getelementptr inbounds i64, i64* %1, i64 0
28 %3 = load i64, i64* %2
29 %4 = insertelement <4 x i64> undef, i64 %3, i32 0
30 %5 = load i64*, i64** undef, align 8
31 %6 = getelementptr inbounds i64, i64* %5, i64 1
32 %7 = load i64, i64* %6
33 %8 = insertelement <4 x i64> %4, i64 %7, i32 1
34 %9 = shufflevector <4 x i64> %8, <4 x i64> undef, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
38 define <4 x float> @merge_4_floats(float* %ptr) {
39 ; ALL-LABEL: merge_4_floats:
41 ; ALL-NEXT: vmovups (%rdi), %xmm0
43 %a = load float, float* %ptr, align 8
44 %vec = insertelement <4 x float> undef, float %a, i32 0
45 %idx1 = getelementptr inbounds float, float* %ptr, i64 1
46 %b = load float, float* %idx1, align 8
47 %vec2 = insertelement <4 x float> %vec, float %b, i32 1
48 %idx3 = getelementptr inbounds float, float* %ptr, i64 2
49 %c = load float, float* %idx3, align 8
50 %vec4 = insertelement <4 x float> %vec2, float %c, i32 2
51 %idx5 = getelementptr inbounds float, float* %ptr, i64 3
52 %d = load float, float* %idx5, align 8
53 %vec6 = insertelement <4 x float> %vec4, float %d, i32 3
57 ; PR21710 ( http://llvm.org/bugs/show_bug.cgi?id=21710 )
58 ; Make sure that 32-byte vectors are handled efficiently.
59 ; If the target has slow 32-byte accesses, we should still generate
62 define <8 x float> @merge_8_floats(float* %ptr) {
63 ; FAST32-LABEL: merge_8_floats:
65 ; FAST32-NEXT: vmovups (%rdi), %ymm0
68 ; SLOW32-LABEL: merge_8_floats:
70 ; SLOW32-NEXT: vmovups (%rdi), %xmm0
71 ; SLOW32-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
73 %a = load float, float* %ptr, align 4
74 %vec = insertelement <8 x float> undef, float %a, i32 0
75 %idx1 = getelementptr inbounds float, float* %ptr, i64 1
76 %b = load float, float* %idx1, align 4
77 %vec2 = insertelement <8 x float> %vec, float %b, i32 1
78 %idx3 = getelementptr inbounds float, float* %ptr, i64 2
79 %c = load float, float* %idx3, align 4
80 %vec4 = insertelement <8 x float> %vec2, float %c, i32 2
81 %idx5 = getelementptr inbounds float, float* %ptr, i64 3
82 %d = load float, float* %idx5, align 4
83 %vec6 = insertelement <8 x float> %vec4, float %d, i32 3
84 %idx7 = getelementptr inbounds float, float* %ptr, i64 4
85 %e = load float, float* %idx7, align 4
86 %vec8 = insertelement <8 x float> %vec6, float %e, i32 4
87 %idx9 = getelementptr inbounds float, float* %ptr, i64 5
88 %f = load float, float* %idx9, align 4
89 %vec10 = insertelement <8 x float> %vec8, float %f, i32 5
90 %idx11 = getelementptr inbounds float, float* %ptr, i64 6
91 %g = load float, float* %idx11, align 4
92 %vec12 = insertelement <8 x float> %vec10, float %g, i32 6
93 %idx13 = getelementptr inbounds float, float* %ptr, i64 7
94 %h = load float, float* %idx13, align 4
95 %vec14 = insertelement <8 x float> %vec12, float %h, i32 7
96 ret <8 x float> %vec14
99 define <4 x double> @merge_4_doubles(double* %ptr) {
100 ; FAST32-LABEL: merge_4_doubles:
102 ; FAST32-NEXT: vmovups (%rdi), %ymm0
105 ; SLOW32-LABEL: merge_4_doubles:
107 ; SLOW32-NEXT: vmovups (%rdi), %xmm0
108 ; SLOW32-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
110 %a = load double, double* %ptr, align 8
111 %vec = insertelement <4 x double> undef, double %a, i32 0
112 %idx1 = getelementptr inbounds double, double* %ptr, i64 1
113 %b = load double, double* %idx1, align 8
114 %vec2 = insertelement <4 x double> %vec, double %b, i32 1
115 %idx3 = getelementptr inbounds double, double* %ptr, i64 2
116 %c = load double, double* %idx3, align 8
117 %vec4 = insertelement <4 x double> %vec2, double %c, i32 2
118 %idx5 = getelementptr inbounds double, double* %ptr, i64 3
119 %d = load double, double* %idx5, align 8
120 %vec6 = insertelement <4 x double> %vec4, double %d, i32 3
121 ret <4 x double> %vec6
124 ; PR21771 ( http://llvm.org/bugs/show_bug.cgi?id=21771 )
125 ; Recognize and combine consecutive loads even when the
126 ; first of the combined loads is offset from the base address.
127 define <4 x double> @merge_4_doubles_offset(double* %ptr) {
128 ; FAST32-LABEL: merge_4_doubles_offset:
130 ; FAST32-NEXT: vmovups 32(%rdi), %ymm0
133 ; SLOW32-LABEL: merge_4_doubles_offset:
135 ; SLOW32-NEXT: vmovups 32(%rdi), %xmm0
136 ; SLOW32-NEXT: vinsertf128 $1, 48(%rdi), %ymm0, %ymm0
138 %arrayidx4 = getelementptr inbounds double, double* %ptr, i64 4
139 %arrayidx5 = getelementptr inbounds double, double* %ptr, i64 5
140 %arrayidx6 = getelementptr inbounds double, double* %ptr, i64 6
141 %arrayidx7 = getelementptr inbounds double, double* %ptr, i64 7
142 %e = load double, double* %arrayidx4, align 8
143 %f = load double, double* %arrayidx5, align 8
144 %g = load double, double* %arrayidx6, align 8
145 %h = load double, double* %arrayidx7, align 8
146 %vecinit4 = insertelement <4 x double> undef, double %e, i32 0
147 %vecinit5 = insertelement <4 x double> %vecinit4, double %f, i32 1
148 %vecinit6 = insertelement <4 x double> %vecinit5, double %g, i32 2
149 %vecinit7 = insertelement <4 x double> %vecinit6, double %h, i32 3
150 ret <4 x double> %vecinit7