1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,-slow-unaligned-mem-32 | FileCheck %s --check-prefix=ALL --check-prefix=FAST32
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+slow-unaligned-mem-32 | FileCheck %s --check-prefix=ALL --check-prefix=SLOW32
5 define <4 x float> @merge_2_floats(ptr nocapture %p) nounwind readonly {
6 ; ALL-LABEL: merge_2_floats:
8 ; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
10 %tmp1 = load float, ptr %p
11 %vecins = insertelement <4 x float> undef, float %tmp1, i32 0
12 %add.ptr = getelementptr float, ptr %p, i32 1
13 %tmp5 = load float, ptr %add.ptr
14 %vecins7 = insertelement <4 x float> %vecins, float %tmp5, i32 1
15 ret <4 x float> %vecins7
18 ; Test-case generated due to a crash when trying to treat loading the first
19 ; two i64s of a <4 x i64> as a load of two i32s.
20 define <4 x i64> @merge_2_floats_into_4() {
21 ; ALL-LABEL: merge_2_floats_into_4:
23 ; ALL-NEXT: movq (%rax), %rax
24 ; ALL-NEXT: vmovups (%rax), %xmm0
26 %1 = load ptr, ptr undef, align 8
28 %3 = insertelement <4 x i64> undef, i64 %2, i32 0
29 %4 = load ptr, ptr undef, align 8
30 %5 = getelementptr inbounds i64, ptr %4, i64 1
32 %7 = insertelement <4 x i64> %3, i64 %6, i32 1
33 %8 = shufflevector <4 x i64> %7, <4 x i64> undef, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
37 define <4 x float> @merge_4_floats(ptr %ptr) {
38 ; ALL-LABEL: merge_4_floats:
40 ; ALL-NEXT: vmovups (%rdi), %xmm0
42 %a = load float, ptr %ptr, align 8
43 %vec = insertelement <4 x float> undef, float %a, i32 0
44 %idx1 = getelementptr inbounds float, ptr %ptr, i64 1
45 %b = load float, ptr %idx1, align 8
46 %vec2 = insertelement <4 x float> %vec, float %b, i32 1
47 %idx3 = getelementptr inbounds float, ptr %ptr, i64 2
48 %c = load float, ptr %idx3, align 8
49 %vec4 = insertelement <4 x float> %vec2, float %c, i32 2
50 %idx5 = getelementptr inbounds float, ptr %ptr, i64 3
51 %d = load float, ptr %idx5, align 8
52 %vec6 = insertelement <4 x float> %vec4, float %d, i32 3
56 ; PR21710 ( http://llvm.org/bugs/show_bug.cgi?id=21710 )
57 ; Make sure that 32-byte vectors are handled efficiently.
58 ; If the target has slow 32-byte accesses, we should still generate
61 define <8 x float> @merge_8_floats(ptr %ptr) {
62 ; FAST32-LABEL: merge_8_floats:
64 ; FAST32-NEXT: vmovups (%rdi), %ymm0
67 ; SLOW32-LABEL: merge_8_floats:
69 ; SLOW32-NEXT: vmovups (%rdi), %xmm0
70 ; SLOW32-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
72 %a = load float, ptr %ptr, align 4
73 %vec = insertelement <8 x float> undef, float %a, i32 0
74 %idx1 = getelementptr inbounds float, ptr %ptr, i64 1
75 %b = load float, ptr %idx1, align 4
76 %vec2 = insertelement <8 x float> %vec, float %b, i32 1
77 %idx3 = getelementptr inbounds float, ptr %ptr, i64 2
78 %c = load float, ptr %idx3, align 4
79 %vec4 = insertelement <8 x float> %vec2, float %c, i32 2
80 %idx5 = getelementptr inbounds float, ptr %ptr, i64 3
81 %d = load float, ptr %idx5, align 4
82 %vec6 = insertelement <8 x float> %vec4, float %d, i32 3
83 %idx7 = getelementptr inbounds float, ptr %ptr, i64 4
84 %e = load float, ptr %idx7, align 4
85 %vec8 = insertelement <8 x float> %vec6, float %e, i32 4
86 %idx9 = getelementptr inbounds float, ptr %ptr, i64 5
87 %f = load float, ptr %idx9, align 4
88 %vec10 = insertelement <8 x float> %vec8, float %f, i32 5
89 %idx11 = getelementptr inbounds float, ptr %ptr, i64 6
90 %g = load float, ptr %idx11, align 4
91 %vec12 = insertelement <8 x float> %vec10, float %g, i32 6
92 %idx13 = getelementptr inbounds float, ptr %ptr, i64 7
93 %h = load float, ptr %idx13, align 4
94 %vec14 = insertelement <8 x float> %vec12, float %h, i32 7
95 ret <8 x float> %vec14
98 define <4 x double> @merge_4_doubles(ptr %ptr) {
99 ; FAST32-LABEL: merge_4_doubles:
101 ; FAST32-NEXT: vmovups (%rdi), %ymm0
104 ; SLOW32-LABEL: merge_4_doubles:
106 ; SLOW32-NEXT: vmovups (%rdi), %xmm0
107 ; SLOW32-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
109 %a = load double, ptr %ptr, align 8
110 %vec = insertelement <4 x double> undef, double %a, i32 0
111 %idx1 = getelementptr inbounds double, ptr %ptr, i64 1
112 %b = load double, ptr %idx1, align 8
113 %vec2 = insertelement <4 x double> %vec, double %b, i32 1
114 %idx3 = getelementptr inbounds double, ptr %ptr, i64 2
115 %c = load double, ptr %idx3, align 8
116 %vec4 = insertelement <4 x double> %vec2, double %c, i32 2
117 %idx5 = getelementptr inbounds double, ptr %ptr, i64 3
118 %d = load double, ptr %idx5, align 8
119 %vec6 = insertelement <4 x double> %vec4, double %d, i32 3
120 ret <4 x double> %vec6
123 ; PR21771 ( http://llvm.org/bugs/show_bug.cgi?id=21771 )
124 ; Recognize and combine consecutive loads even when the
125 ; first of the combined loads is offset from the base address.
126 define <4 x double> @merge_4_doubles_offset(ptr %ptr) {
127 ; FAST32-LABEL: merge_4_doubles_offset:
129 ; FAST32-NEXT: vmovups 32(%rdi), %ymm0
132 ; SLOW32-LABEL: merge_4_doubles_offset:
134 ; SLOW32-NEXT: vmovups 32(%rdi), %xmm0
135 ; SLOW32-NEXT: vinsertf128 $1, 48(%rdi), %ymm0, %ymm0
137 %arrayidx4 = getelementptr inbounds double, ptr %ptr, i64 4
138 %arrayidx5 = getelementptr inbounds double, ptr %ptr, i64 5
139 %arrayidx6 = getelementptr inbounds double, ptr %ptr, i64 6
140 %arrayidx7 = getelementptr inbounds double, ptr %ptr, i64 7
141 %e = load double, ptr %arrayidx4, align 8
142 %f = load double, ptr %arrayidx5, align 8
143 %g = load double, ptr %arrayidx6, align 8
144 %h = load double, ptr %arrayidx7, align 8
145 %vecinit4 = insertelement <4 x double> undef, double %e, i32 0
146 %vecinit5 = insertelement <4 x double> %vecinit4, double %f, i32 1
147 %vecinit6 = insertelement <4 x double> %vecinit5, double %g, i32 2
148 %vecinit7 = insertelement <4 x double> %vecinit6, double %h, i32 3
149 ret <4 x double> %vecinit7