1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2,-bmi2,+slow-shld | FileCheck %s --check-prefixes=ALL,X64,X64-NO-BMI2,X64-NO-SHLD,X64-NO-BMI2-NO-SHLD
3 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2,-bmi2,-slow-shld | FileCheck %s --check-prefixes=ALL,X64,X64-NO-BMI2,X64-SHLD,X64-NO-BMI2-HAVE-SHLD
4 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2,+bmi2,+slow-shld | FileCheck %s --check-prefixes=ALL,X64,X64-BMI2,X64-NO-SHLD,X64-HAVE-BMI2-NO-SHLD
5 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2,+bmi2,-slow-shld | FileCheck %s --check-prefixes=ALL,X64,X64-BMI2,X64-SHLD,X64-HAVE-BMI2-HAVE-SHLD
6 ; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+sse2,-bmi2,+slow-shld | FileCheck %s --check-prefixes=ALL,X86,X86-NO-BMI2,X86-NO-SHLD,X86-NO-BMI2-NO-SHLD
7 ; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+sse2,-bmi2,-slow-shld | FileCheck %s --check-prefixes=ALL,X86,X86-NO-BMI2,X86-SHLD,X86-NO-BMI2-HAVE-SHLD
8 ; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+sse2,+bmi2,+slow-shld | FileCheck %s --check-prefixes=ALL,X86,X86-BMI2,X86-NO-SHLD,X86-HAVE-BMI2-NO-SHLD
9 ; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+sse2,+bmi2,-slow-shld | FileCheck %s --check-prefixes=ALL,X86,X86-BMI2,X86-SHLD,X86-HAVE-BMI2-HAVE-SHLD
11 define void @load_1byte_chunk_of_2byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
12 ; X64-NO-BMI2-LABEL: load_1byte_chunk_of_2byte_alloca_with_zero_upper_half:
13 ; X64-NO-BMI2: # %bb.0:
14 ; X64-NO-BMI2-NEXT: movzbl (%rdi), %eax
15 ; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
16 ; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
17 ; X64-NO-BMI2-NEXT: shrl %cl, %eax
18 ; X64-NO-BMI2-NEXT: movb %al, (%rdx)
19 ; X64-NO-BMI2-NEXT: retq
21 ; X64-BMI2-LABEL: load_1byte_chunk_of_2byte_alloca_with_zero_upper_half:
23 ; X64-BMI2-NEXT: movzbl (%rdi), %eax
24 ; X64-BMI2-NEXT: shll $3, %esi
25 ; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
26 ; X64-BMI2-NEXT: movb %al, (%rdx)
29 ; X86-NO-BMI2-LABEL: load_1byte_chunk_of_2byte_alloca_with_zero_upper_half:
30 ; X86-NO-BMI2: # %bb.0:
31 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
32 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
33 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
34 ; X86-NO-BMI2-NEXT: movzbl (%eax), %eax
35 ; X86-NO-BMI2-NEXT: shll $3, %ecx
36 ; X86-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
37 ; X86-NO-BMI2-NEXT: shrl %cl, %eax
38 ; X86-NO-BMI2-NEXT: movb %al, (%edx)
39 ; X86-NO-BMI2-NEXT: retl
41 ; X86-BMI2-LABEL: load_1byte_chunk_of_2byte_alloca_with_zero_upper_half:
43 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
44 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
45 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
46 ; X86-BMI2-NEXT: movzbl (%edx), %edx
47 ; X86-BMI2-NEXT: shll $3, %ecx
48 ; X86-BMI2-NEXT: shrxl %ecx, %edx, %ecx
49 ; X86-BMI2-NEXT: movb %cl, (%eax)
51 %init1 = load i8, ptr %src, align 1
52 %intermediate.sroa.0.0.vec.insert = insertelement <2 x i8> <i8 poison, i8 0>, i8 %init1, i64 0
53 %intermediate.val.frozen = freeze <2 x i8> %intermediate.sroa.0.0.vec.insert
54 %intermediate.val.frozen.bits = bitcast <2 x i8> %intermediate.val.frozen to i16
55 %byteOff.tr = trunc i64 %byteOff to i16
56 %byteOff.numbits.wide = shl i16 %byteOff.tr, 3
57 %intermediate.val.frozen.bits.positioned = lshr i16 %intermediate.val.frozen.bits, %byteOff.numbits.wide
58 %intermediate.val.frozen.bits.positioned.extracted = trunc i16 %intermediate.val.frozen.bits.positioned to i8
59 %1 = insertelement <1 x i8> poison, i8 %intermediate.val.frozen.bits.positioned.extracted, i64 0
60 store <1 x i8> %1, ptr %dst, align 1
64 define void @load_1byte_chunk_of_4byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
65 ; X64-NO-BMI2-LABEL: load_1byte_chunk_of_4byte_alloca_with_zero_upper_half:
66 ; X64-NO-BMI2: # %bb.0:
67 ; X64-NO-BMI2-NEXT: movzwl (%rdi), %eax
68 ; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
69 ; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
70 ; X64-NO-BMI2-NEXT: shrl %cl, %eax
71 ; X64-NO-BMI2-NEXT: movb %al, (%rdx)
72 ; X64-NO-BMI2-NEXT: retq
74 ; X64-BMI2-LABEL: load_1byte_chunk_of_4byte_alloca_with_zero_upper_half:
76 ; X64-BMI2-NEXT: movzwl (%rdi), %eax
77 ; X64-BMI2-NEXT: shll $3, %esi
78 ; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
79 ; X64-BMI2-NEXT: movb %al, (%rdx)
82 ; X86-NO-BMI2-LABEL: load_1byte_chunk_of_4byte_alloca_with_zero_upper_half:
83 ; X86-NO-BMI2: # %bb.0:
84 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
85 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
86 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
87 ; X86-NO-BMI2-NEXT: movzwl (%eax), %eax
88 ; X86-NO-BMI2-NEXT: shll $3, %ecx
89 ; X86-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
90 ; X86-NO-BMI2-NEXT: shrl %cl, %eax
91 ; X86-NO-BMI2-NEXT: movb %al, (%edx)
92 ; X86-NO-BMI2-NEXT: retl
94 ; X86-BMI2-LABEL: load_1byte_chunk_of_4byte_alloca_with_zero_upper_half:
96 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
97 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
98 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
99 ; X86-BMI2-NEXT: movzwl (%edx), %edx
100 ; X86-BMI2-NEXT: shll $3, %ecx
101 ; X86-BMI2-NEXT: shrxl %ecx, %edx, %ecx
102 ; X86-BMI2-NEXT: movb %cl, (%eax)
103 ; X86-BMI2-NEXT: retl
104 %init = load <2 x i8>, ptr %src, align 1
105 %intermediate.sroa.0.0.vec.expand = shufflevector <2 x i8> %init, <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
106 %intermediate.sroa.0.0.vecblend = shufflevector <4 x i8> %intermediate.sroa.0.0.vec.expand, <4 x i8> <i8 poison, i8 poison, i8 0, i8 0>, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
107 %intermediate.val.frozen = freeze <4 x i8> %intermediate.sroa.0.0.vecblend
108 %intermediate.val.frozen.bits = bitcast <4 x i8> %intermediate.val.frozen to i32
109 %byteOff.tr = trunc i64 %byteOff to i32
110 %byteOff.numbits.wide = shl i32 %byteOff.tr, 3
111 %intermediate.val.frozen.bits.positioned = lshr i32 %intermediate.val.frozen.bits, %byteOff.numbits.wide
112 %intermediate.val.frozen.bits.positioned.extracted = trunc i32 %intermediate.val.frozen.bits.positioned to i8
113 %1 = insertelement <1 x i8> poison, i8 %intermediate.val.frozen.bits.positioned.extracted, i64 0
114 store <1 x i8> %1, ptr %dst, align 1
118 define void @load_2byte_chunk_of_4byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
119 ; X64-NO-BMI2-LABEL: load_2byte_chunk_of_4byte_alloca_with_zero_upper_half:
120 ; X64-NO-BMI2: # %bb.0:
121 ; X64-NO-BMI2-NEXT: movzwl (%rdi), %eax
122 ; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
123 ; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
124 ; X64-NO-BMI2-NEXT: shrl %cl, %eax
125 ; X64-NO-BMI2-NEXT: movw %ax, (%rdx)
126 ; X64-NO-BMI2-NEXT: retq
128 ; X64-BMI2-LABEL: load_2byte_chunk_of_4byte_alloca_with_zero_upper_half:
130 ; X64-BMI2-NEXT: movzwl (%rdi), %eax
131 ; X64-BMI2-NEXT: shll $3, %esi
132 ; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
133 ; X64-BMI2-NEXT: movw %ax, (%rdx)
134 ; X64-BMI2-NEXT: retq
136 ; X86-NO-BMI2-LABEL: load_2byte_chunk_of_4byte_alloca_with_zero_upper_half:
137 ; X86-NO-BMI2: # %bb.0:
138 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
139 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
140 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
141 ; X86-NO-BMI2-NEXT: movzwl (%edx), %edx
142 ; X86-NO-BMI2-NEXT: shll $3, %ecx
143 ; X86-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
144 ; X86-NO-BMI2-NEXT: shrl %cl, %edx
145 ; X86-NO-BMI2-NEXT: movw %dx, (%eax)
146 ; X86-NO-BMI2-NEXT: retl
148 ; X86-BMI2-LABEL: load_2byte_chunk_of_4byte_alloca_with_zero_upper_half:
150 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
151 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
152 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
153 ; X86-BMI2-NEXT: movzwl (%edx), %edx
154 ; X86-BMI2-NEXT: shll $3, %ecx
155 ; X86-BMI2-NEXT: shrxl %ecx, %edx, %ecx
156 ; X86-BMI2-NEXT: movw %cx, (%eax)
157 ; X86-BMI2-NEXT: retl
158 %init = load <2 x i8>, ptr %src, align 1
159 %intermediate.sroa.0.0.vec.expand = shufflevector <2 x i8> %init, <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
160 %intermediate.sroa.0.0.vecblend = shufflevector <4 x i8> %intermediate.sroa.0.0.vec.expand, <4 x i8> <i8 poison, i8 poison, i8 0, i8 0>, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
161 %intermediate.val.frozen = freeze <4 x i8> %intermediate.sroa.0.0.vecblend
162 %intermediate.val.frozen.bits = bitcast <4 x i8> %intermediate.val.frozen to i32
163 %byteOff.tr = trunc i64 %byteOff to i32
164 %byteOff.numbits.wide = shl i32 %byteOff.tr, 3
165 %intermediate.val.frozen.bits.positioned = lshr i32 %intermediate.val.frozen.bits, %byteOff.numbits.wide
166 %intermediate.val.frozen.bits.positioned.extracted = trunc i32 %intermediate.val.frozen.bits.positioned to i16
167 store i16 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 2
171 define void @load_1byte_chunk_of_8byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
172 ; X64-NO-BMI2-LABEL: load_1byte_chunk_of_8byte_alloca_with_zero_upper_half:
173 ; X64-NO-BMI2: # %bb.0:
174 ; X64-NO-BMI2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
175 ; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
176 ; X64-NO-BMI2-NEXT: movq %xmm0, %rax
177 ; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
178 ; X64-NO-BMI2-NEXT: shrq %cl, %rax
179 ; X64-NO-BMI2-NEXT: movb %al, (%rdx)
180 ; X64-NO-BMI2-NEXT: retq
182 ; X64-BMI2-LABEL: load_1byte_chunk_of_8byte_alloca_with_zero_upper_half:
184 ; X64-BMI2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
185 ; X64-BMI2-NEXT: shll $3, %esi
186 ; X64-BMI2-NEXT: movq %xmm0, %rax
187 ; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
188 ; X64-BMI2-NEXT: movb %al, (%rdx)
189 ; X64-BMI2-NEXT: retq
191 ; X86-NO-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_8byte_alloca_with_zero_upper_half:
192 ; X86-NO-BMI2-NO-SHLD: # %bb.0:
193 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
194 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
195 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
196 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
197 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
198 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
199 ; X86-NO-BMI2-NO-SHLD-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
200 ; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax
201 ; X86-NO-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
202 ; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm1, %ebx
203 ; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm0, %esi
204 ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
205 ; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
206 ; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
207 ; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %edi
208 ; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
209 ; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edi
210 ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
211 ; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
212 ; X86-NO-BMI2-NO-SHLD-NEXT: testb $32, %al
213 ; X86-NO-BMI2-NO-SHLD-NEXT: cmovel %edi, %ebx
214 ; X86-NO-BMI2-NO-SHLD-NEXT: movb %bl, (%edx)
215 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
216 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
217 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
218 ; X86-NO-BMI2-NO-SHLD-NEXT: retl
220 ; X86-NO-BMI2-HAVE-SHLD-LABEL: load_1byte_chunk_of_8byte_alloca_with_zero_upper_half:
221 ; X86-NO-BMI2-HAVE-SHLD: # %bb.0:
222 ; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %esi
223 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
224 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
225 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
226 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
227 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
228 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi
229 ; X86-NO-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
230 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx
231 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi
232 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edx
233 ; X86-NO-BMI2-HAVE-SHLD-NEXT: testb $32, %cl
234 ; X86-NO-BMI2-HAVE-SHLD-NEXT: cmovel %esi, %edx
235 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movb %dl, (%eax)
236 ; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %esi
237 ; X86-NO-BMI2-HAVE-SHLD-NEXT: retl
239 ; X86-HAVE-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_8byte_alloca_with_zero_upper_half:
240 ; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
241 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
242 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
243 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
244 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
245 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
246 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
247 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
248 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
249 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
250 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm1, %edx
251 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm0, %esi
252 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %esi
253 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
254 ; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
255 ; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %edi
256 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
257 ; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edi
258 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx
259 ; X86-HAVE-BMI2-NO-SHLD-NEXT: testb $32, %cl
260 ; X86-HAVE-BMI2-NO-SHLD-NEXT: cmovel %edi, %edx
261 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movb %dl, (%eax)
262 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
263 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
264 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
265 ; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
267 ; X86-HAVE-BMI2-HAVE-SHLD-LABEL: load_1byte_chunk_of_8byte_alloca_with_zero_upper_half:
268 ; X86-HAVE-BMI2-HAVE-SHLD: # %bb.0:
269 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %ebx
270 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi
271 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
272 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
273 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
274 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
275 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
276 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx
277 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
278 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi
279 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx
280 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %ebx
281 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: testb $32, %cl
282 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: cmovel %edx, %ebx
283 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movb %bl, (%eax)
284 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi
285 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %ebx
286 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: retl
287 %init = load <4 x i8>, ptr %src, align 1
288 %intermediate.sroa.0.0.vec.expand = shufflevector <4 x i8> %init, <4 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
289 %intermediate.sroa.0.0.vecblend = shufflevector <8 x i8> %intermediate.sroa.0.0.vec.expand, <8 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0>, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
290 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
291 %intermediate.val.frozen = freeze <8 x i8> %intermediate.sroa.0.0.vecblend
292 %intermediate.val.frozen.bits = bitcast <8 x i8> %intermediate.val.frozen to i64
293 %intermediate.val.frozen.bits.positioned = lshr i64 %intermediate.val.frozen.bits, %byteOff.numbits
294 %intermediate.val.frozen.bits.positioned.extracted = trunc i64 %intermediate.val.frozen.bits.positioned to i8
295 %1 = insertelement <1 x i8> poison, i8 %intermediate.val.frozen.bits.positioned.extracted, i64 0
296 store <1 x i8> %1, ptr %dst, align 1
300 define void @load_2byte_chunk_of_8byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
301 ; X64-NO-BMI2-LABEL: load_2byte_chunk_of_8byte_alloca_with_zero_upper_half:
302 ; X64-NO-BMI2: # %bb.0:
303 ; X64-NO-BMI2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
304 ; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
305 ; X64-NO-BMI2-NEXT: movq %xmm0, %rax
306 ; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
307 ; X64-NO-BMI2-NEXT: shrq %cl, %rax
308 ; X64-NO-BMI2-NEXT: movw %ax, (%rdx)
309 ; X64-NO-BMI2-NEXT: retq
311 ; X64-BMI2-LABEL: load_2byte_chunk_of_8byte_alloca_with_zero_upper_half:
313 ; X64-BMI2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
314 ; X64-BMI2-NEXT: shll $3, %esi
315 ; X64-BMI2-NEXT: movq %xmm0, %rax
316 ; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
317 ; X64-BMI2-NEXT: movw %ax, (%rdx)
318 ; X64-BMI2-NEXT: retq
320 ; X86-NO-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_8byte_alloca_with_zero_upper_half:
321 ; X86-NO-BMI2-NO-SHLD: # %bb.0:
322 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
323 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
324 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
325 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
326 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
327 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
328 ; X86-NO-BMI2-NO-SHLD-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
329 ; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax
330 ; X86-NO-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
331 ; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm1, %esi
332 ; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm0, %edi
333 ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
334 ; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
335 ; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
336 ; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebx
337 ; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
338 ; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
339 ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
340 ; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
341 ; X86-NO-BMI2-NO-SHLD-NEXT: testb $32, %al
342 ; X86-NO-BMI2-NO-SHLD-NEXT: cmovel %ebx, %esi
343 ; X86-NO-BMI2-NO-SHLD-NEXT: movw %si, (%edx)
344 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
345 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
346 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
347 ; X86-NO-BMI2-NO-SHLD-NEXT: retl
349 ; X86-NO-BMI2-HAVE-SHLD-LABEL: load_2byte_chunk_of_8byte_alloca_with_zero_upper_half:
350 ; X86-NO-BMI2-HAVE-SHLD: # %bb.0:
351 ; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %esi
352 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
353 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
354 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
355 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
356 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
357 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx
358 ; X86-NO-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
359 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi
360 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx
361 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %esi
362 ; X86-NO-BMI2-HAVE-SHLD-NEXT: testb $32, %cl
363 ; X86-NO-BMI2-HAVE-SHLD-NEXT: cmovel %edx, %esi
364 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movw %si, (%eax)
365 ; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %esi
366 ; X86-NO-BMI2-HAVE-SHLD-NEXT: retl
368 ; X86-HAVE-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_8byte_alloca_with_zero_upper_half:
369 ; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
370 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
371 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
372 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
373 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
374 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
375 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
376 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
377 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
378 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
379 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm1, %edx
380 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm0, %esi
381 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %esi
382 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
383 ; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
384 ; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %edi
385 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
386 ; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edi
387 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx
388 ; X86-HAVE-BMI2-NO-SHLD-NEXT: testb $32, %cl
389 ; X86-HAVE-BMI2-NO-SHLD-NEXT: cmovel %edi, %edx
390 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movw %dx, (%eax)
391 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
392 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
393 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
394 ; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
396 ; X86-HAVE-BMI2-HAVE-SHLD-LABEL: load_2byte_chunk_of_8byte_alloca_with_zero_upper_half:
397 ; X86-HAVE-BMI2-HAVE-SHLD: # %bb.0:
398 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi
399 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
400 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
401 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
402 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
403 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
404 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx
405 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
406 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi
407 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx
408 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %esi
409 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: testb $32, %cl
410 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: cmovel %edx, %esi
411 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movw %si, (%eax)
412 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi
413 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: retl
414 %init = load <4 x i8>, ptr %src, align 1
415 %intermediate.sroa.0.0.vec.expand = shufflevector <4 x i8> %init, <4 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
416 %intermediate.sroa.0.0.vecblend = shufflevector <8 x i8> %intermediate.sroa.0.0.vec.expand, <8 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0>, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
417 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
418 %intermediate.val.frozen = freeze <8 x i8> %intermediate.sroa.0.0.vecblend
419 %intermediate.val.frozen.bits = bitcast <8 x i8> %intermediate.val.frozen to i64
420 %intermediate.val.frozen.bits.positioned = lshr i64 %intermediate.val.frozen.bits, %byteOff.numbits
421 %intermediate.val.frozen.bits.positioned.extracted = trunc i64 %intermediate.val.frozen.bits.positioned to i16
422 store i16 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 2
426 define void @load_4byte_chunk_of_8byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
427 ; X64-NO-BMI2-LABEL: load_4byte_chunk_of_8byte_alloca_with_zero_upper_half:
428 ; X64-NO-BMI2: # %bb.0:
429 ; X64-NO-BMI2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
430 ; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
431 ; X64-NO-BMI2-NEXT: movq %xmm0, %rax
432 ; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
433 ; X64-NO-BMI2-NEXT: shrq %cl, %rax
434 ; X64-NO-BMI2-NEXT: movl %eax, (%rdx)
435 ; X64-NO-BMI2-NEXT: retq
437 ; X64-BMI2-LABEL: load_4byte_chunk_of_8byte_alloca_with_zero_upper_half:
439 ; X64-BMI2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
440 ; X64-BMI2-NEXT: shll $3, %esi
441 ; X64-BMI2-NEXT: movq %xmm0, %rax
442 ; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
443 ; X64-BMI2-NEXT: movl %eax, (%rdx)
444 ; X64-BMI2-NEXT: retq
446 ; X86-NO-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_8byte_alloca_with_zero_upper_half:
447 ; X86-NO-BMI2-NO-SHLD: # %bb.0:
448 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
449 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
450 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
451 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
452 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
453 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
454 ; X86-NO-BMI2-NO-SHLD-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
455 ; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax
456 ; X86-NO-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
457 ; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm1, %esi
458 ; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm0, %edi
459 ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
460 ; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
461 ; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
462 ; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebx
463 ; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
464 ; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
465 ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
466 ; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
467 ; X86-NO-BMI2-NO-SHLD-NEXT: testb $32, %al
468 ; X86-NO-BMI2-NO-SHLD-NEXT: cmovel %ebx, %esi
469 ; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, (%edx)
470 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
471 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
472 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
473 ; X86-NO-BMI2-NO-SHLD-NEXT: retl
475 ; X86-NO-BMI2-HAVE-SHLD-LABEL: load_4byte_chunk_of_8byte_alloca_with_zero_upper_half:
476 ; X86-NO-BMI2-HAVE-SHLD: # %bb.0:
477 ; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %esi
478 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
479 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
480 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
481 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
482 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
483 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx
484 ; X86-NO-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
485 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi
486 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx
487 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %esi
488 ; X86-NO-BMI2-HAVE-SHLD-NEXT: testb $32, %cl
489 ; X86-NO-BMI2-HAVE-SHLD-NEXT: cmovel %edx, %esi
490 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, (%eax)
491 ; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %esi
492 ; X86-NO-BMI2-HAVE-SHLD-NEXT: retl
494 ; X86-HAVE-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_8byte_alloca_with_zero_upper_half:
495 ; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
496 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
497 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
498 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
499 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
500 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
501 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
502 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
503 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
504 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
505 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm1, %edx
506 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm0, %esi
507 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %esi
508 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
509 ; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
510 ; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %edi
511 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
512 ; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edi
513 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx
514 ; X86-HAVE-BMI2-NO-SHLD-NEXT: testb $32, %cl
515 ; X86-HAVE-BMI2-NO-SHLD-NEXT: cmovel %edi, %edx
516 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%eax)
517 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
518 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
519 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
520 ; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
522 ; X86-HAVE-BMI2-HAVE-SHLD-LABEL: load_4byte_chunk_of_8byte_alloca_with_zero_upper_half:
523 ; X86-HAVE-BMI2-HAVE-SHLD: # %bb.0:
524 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi
525 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
526 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
527 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
528 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
529 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
530 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx
531 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
532 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi
533 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx
534 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %esi
535 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: testb $32, %cl
536 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: cmovel %edx, %esi
537 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, (%eax)
538 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi
539 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: retl
540 %init = load <4 x i8>, ptr %src, align 1
541 %intermediate.sroa.0.0.vec.expand = shufflevector <4 x i8> %init, <4 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
542 %intermediate.sroa.0.0.vecblend = shufflevector <8 x i8> %intermediate.sroa.0.0.vec.expand, <8 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0>, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
543 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
544 %intermediate.val.frozen = freeze <8 x i8> %intermediate.sroa.0.0.vecblend
545 %intermediate.val.frozen.bits = bitcast <8 x i8> %intermediate.val.frozen to i64
546 %intermediate.val.frozen.bits.positioned = lshr i64 %intermediate.val.frozen.bits, %byteOff.numbits
547 %intermediate.val.frozen.bits.positioned.extracted = trunc i64 %intermediate.val.frozen.bits.positioned to i32
548 store i32 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 4
552 define void @load_1byte_chunk_of_16byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
553 ; X64-NO-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca_with_zero_upper_half:
554 ; X64-NO-BMI2-NO-SHLD: # %bb.0:
555 ; X64-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
556 ; X64-NO-BMI2-NO-SHLD-NEXT: shll $3, %esi
557 ; X64-NO-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
558 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax
559 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm0, %rdi
560 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
561 ; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rdi
562 ; X64-NO-BMI2-NO-SHLD-NEXT: notb %cl
563 ; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r8
564 ; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r8
565 ; X64-NO-BMI2-NO-SHLD-NEXT: orq %rdi, %r8
566 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
567 ; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rax
568 ; X64-NO-BMI2-NO-SHLD-NEXT: testb $64, %sil
569 ; X64-NO-BMI2-NO-SHLD-NEXT: cmoveq %r8, %rax
570 ; X64-NO-BMI2-NO-SHLD-NEXT: movb %al, (%rdx)
571 ; X64-NO-BMI2-NO-SHLD-NEXT: retq
573 ; X64-NO-BMI2-HAVE-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca_with_zero_upper_half:
574 ; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
575 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx
576 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
577 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
578 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax
579 ; X64-NO-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
580 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi
581 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
582 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %rsi
583 ; X64-NO-BMI2-HAVE-SHLD-NEXT: testb $64, %cl
584 ; X64-NO-BMI2-HAVE-SHLD-NEXT: cmoveq %rax, %rsi
585 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movb %sil, (%rdx)
586 ; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
588 ; X64-HAVE-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca_with_zero_upper_half:
589 ; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
590 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
591 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %esi
592 ; X64-HAVE-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
593 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax
594 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm0, %rcx
595 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rcx, %rcx
596 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %edi
597 ; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %dil
598 ; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r8
599 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rdi, %r8, %rdi
600 ; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rdi
601 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rax, %rax
602 ; X64-HAVE-BMI2-NO-SHLD-NEXT: testb $64, %sil
603 ; X64-HAVE-BMI2-NO-SHLD-NEXT: cmoveq %rdi, %rax
604 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movb %al, (%rdx)
605 ; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
607 ; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca_with_zero_upper_half:
608 ; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
609 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx
610 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
611 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
612 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax
613 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
614 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi
615 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
616 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %rsi, %rsi
617 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: testb $64, %cl
618 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: cmoveq %rax, %rsi
619 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movb %sil, (%rdx)
620 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
622 ; X86-LABEL: load_1byte_chunk_of_16byte_alloca_with_zero_upper_half:
624 ; X86-NEXT: subl $32, %esp
625 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
626 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
627 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
628 ; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
629 ; X86-NEXT: shll $3, %ecx
630 ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
631 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
632 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
633 ; X86-NEXT: movd %xmm0, (%esp)
634 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
635 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
636 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
637 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
638 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
639 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
640 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
641 ; X86-NEXT: shrb $3, %cl
642 ; X86-NEXT: andb $15, %cl
643 ; X86-NEXT: movzbl %cl, %ecx
644 ; X86-NEXT: movzbl (%esp,%ecx), %ecx
645 ; X86-NEXT: movb %cl, (%eax)
646 ; X86-NEXT: addl $32, %esp
648 %init = load <8 x i8>, ptr %src, align 1
649 %intermediate.sroa.0.0.vec.expand = shufflevector <8 x i8> %init, <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
650 %intermediate.sroa.0.0.vecblend = shufflevector <16 x i8> %intermediate.sroa.0.0.vec.expand, <16 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
651 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
652 %intermediate.val.frozen = freeze <16 x i8> %intermediate.sroa.0.0.vecblend
653 %intermediate.val.frozen.bits = bitcast <16 x i8> %intermediate.val.frozen to i128
654 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i128
655 %intermediate.val.frozen.bits.positioned = lshr i128 %intermediate.val.frozen.bits, %byteOff.numbits.wide
656 %intermediate.val.frozen.bits.positioned.extracted = trunc i128 %intermediate.val.frozen.bits.positioned to i8
657 %1 = insertelement <1 x i8> poison, i8 %intermediate.val.frozen.bits.positioned.extracted, i64 0
658 store <1 x i8> %1, ptr %dst, align 1
662 define void @load_2byte_chunk_of_16byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
663 ; X64-NO-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca_with_zero_upper_half:
664 ; X64-NO-BMI2-NO-SHLD: # %bb.0:
665 ; X64-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
666 ; X64-NO-BMI2-NO-SHLD-NEXT: shll $3, %esi
667 ; X64-NO-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
668 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax
669 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm0, %rdi
670 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
671 ; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rdi
672 ; X64-NO-BMI2-NO-SHLD-NEXT: notb %cl
673 ; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r8
674 ; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r8
675 ; X64-NO-BMI2-NO-SHLD-NEXT: orq %rdi, %r8
676 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
677 ; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rax
678 ; X64-NO-BMI2-NO-SHLD-NEXT: testb $64, %sil
679 ; X64-NO-BMI2-NO-SHLD-NEXT: cmoveq %r8, %rax
680 ; X64-NO-BMI2-NO-SHLD-NEXT: movw %ax, (%rdx)
681 ; X64-NO-BMI2-NO-SHLD-NEXT: retq
683 ; X64-NO-BMI2-HAVE-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca_with_zero_upper_half:
684 ; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
685 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx
686 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
687 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
688 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax
689 ; X64-NO-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
690 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi
691 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
692 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %rsi
693 ; X64-NO-BMI2-HAVE-SHLD-NEXT: testb $64, %cl
694 ; X64-NO-BMI2-HAVE-SHLD-NEXT: cmoveq %rax, %rsi
695 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movw %si, (%rdx)
696 ; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
698 ; X64-HAVE-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca_with_zero_upper_half:
699 ; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
700 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
701 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %esi
702 ; X64-HAVE-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
703 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax
704 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm0, %rcx
705 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rcx, %rcx
706 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %edi
707 ; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %dil
708 ; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r8
709 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rdi, %r8, %rdi
710 ; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rdi
711 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rax, %rax
712 ; X64-HAVE-BMI2-NO-SHLD-NEXT: testb $64, %sil
713 ; X64-HAVE-BMI2-NO-SHLD-NEXT: cmoveq %rdi, %rax
714 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movw %ax, (%rdx)
715 ; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
717 ; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca_with_zero_upper_half:
718 ; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
719 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx
720 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
721 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
722 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax
723 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
724 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi
725 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
726 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %rsi, %rsi
727 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: testb $64, %cl
728 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: cmoveq %rax, %rsi
729 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movw %si, (%rdx)
730 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
732 ; X86-LABEL: load_2byte_chunk_of_16byte_alloca_with_zero_upper_half:
734 ; X86-NEXT: subl $32, %esp
735 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
736 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
737 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
738 ; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
739 ; X86-NEXT: shll $3, %ecx
740 ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
741 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
742 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
743 ; X86-NEXT: movd %xmm0, (%esp)
744 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
745 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
746 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
747 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
748 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
749 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
750 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
751 ; X86-NEXT: shrb $3, %cl
752 ; X86-NEXT: andb $15, %cl
753 ; X86-NEXT: movzbl %cl, %ecx
754 ; X86-NEXT: movl (%esp,%ecx), %ecx
755 ; X86-NEXT: movw %cx, (%eax)
756 ; X86-NEXT: addl $32, %esp
758 %init = load <8 x i8>, ptr %src, align 1
759 %intermediate.sroa.0.0.vec.expand = shufflevector <8 x i8> %init, <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
760 %intermediate.sroa.0.0.vecblend = shufflevector <16 x i8> %intermediate.sroa.0.0.vec.expand, <16 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
761 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
762 %intermediate.val.frozen = freeze <16 x i8> %intermediate.sroa.0.0.vecblend
763 %intermediate.val.frozen.bits = bitcast <16 x i8> %intermediate.val.frozen to i128
764 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i128
765 %intermediate.val.frozen.bits.positioned = lshr i128 %intermediate.val.frozen.bits, %byteOff.numbits.wide
766 %intermediate.val.frozen.bits.positioned.extracted = trunc i128 %intermediate.val.frozen.bits.positioned to i16
767 store i16 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 2
771 define void @load_4byte_chunk_of_16byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
772 ; X64-NO-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca_with_zero_upper_half:
773 ; X64-NO-BMI2-NO-SHLD: # %bb.0:
774 ; X64-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
775 ; X64-NO-BMI2-NO-SHLD-NEXT: shll $3, %esi
776 ; X64-NO-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
777 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax
778 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm0, %rdi
779 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
780 ; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rdi
781 ; X64-NO-BMI2-NO-SHLD-NEXT: notb %cl
782 ; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r8
783 ; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r8
784 ; X64-NO-BMI2-NO-SHLD-NEXT: orq %rdi, %r8
785 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
786 ; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rax
787 ; X64-NO-BMI2-NO-SHLD-NEXT: testb $64, %sil
788 ; X64-NO-BMI2-NO-SHLD-NEXT: cmoveq %r8, %rax
789 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, (%rdx)
790 ; X64-NO-BMI2-NO-SHLD-NEXT: retq
792 ; X64-NO-BMI2-HAVE-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca_with_zero_upper_half:
793 ; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
794 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx
795 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
796 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
797 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax
798 ; X64-NO-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
799 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi
800 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
801 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %rsi
802 ; X64-NO-BMI2-HAVE-SHLD-NEXT: testb $64, %cl
803 ; X64-NO-BMI2-HAVE-SHLD-NEXT: cmoveq %rax, %rsi
804 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, (%rdx)
805 ; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
807 ; X64-HAVE-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca_with_zero_upper_half:
808 ; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
809 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
810 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %esi
811 ; X64-HAVE-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
812 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax
813 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm0, %rcx
814 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rcx, %rcx
815 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %edi
816 ; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %dil
817 ; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r8
818 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rdi, %r8, %rdi
819 ; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rdi
820 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rax, %rax
821 ; X64-HAVE-BMI2-NO-SHLD-NEXT: testb $64, %sil
822 ; X64-HAVE-BMI2-NO-SHLD-NEXT: cmoveq %rdi, %rax
823 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%rdx)
824 ; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
826 ; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca_with_zero_upper_half:
827 ; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
828 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx
829 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
830 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
831 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax
832 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
833 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi
834 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
835 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %rsi, %rsi
836 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: testb $64, %cl
837 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: cmoveq %rax, %rsi
838 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, (%rdx)
839 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
841 ; X86-LABEL: load_4byte_chunk_of_16byte_alloca_with_zero_upper_half:
843 ; X86-NEXT: subl $32, %esp
844 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
845 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
846 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
847 ; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
848 ; X86-NEXT: shll $3, %ecx
849 ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
850 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
851 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
852 ; X86-NEXT: movd %xmm0, (%esp)
853 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
854 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
855 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
856 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
857 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
858 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
859 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
860 ; X86-NEXT: shrb $3, %cl
861 ; X86-NEXT: andb $15, %cl
862 ; X86-NEXT: movzbl %cl, %ecx
863 ; X86-NEXT: movl (%esp,%ecx), %ecx
864 ; X86-NEXT: movl %ecx, (%eax)
865 ; X86-NEXT: addl $32, %esp
867 %init = load <8 x i8>, ptr %src, align 1
868 %intermediate.sroa.0.0.vec.expand = shufflevector <8 x i8> %init, <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
869 %intermediate.sroa.0.0.vecblend = shufflevector <16 x i8> %intermediate.sroa.0.0.vec.expand, <16 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
870 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
871 %intermediate.val.frozen = freeze <16 x i8> %intermediate.sroa.0.0.vecblend
872 %intermediate.val.frozen.bits = bitcast <16 x i8> %intermediate.val.frozen to i128
873 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i128
874 %intermediate.val.frozen.bits.positioned = lshr i128 %intermediate.val.frozen.bits, %byteOff.numbits.wide
875 %intermediate.val.frozen.bits.positioned.extracted = trunc i128 %intermediate.val.frozen.bits.positioned to i32
876 store i32 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 4
880 define void @load_8byte_chunk_of_16byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
881 ; X64-NO-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca_with_zero_upper_half:
882 ; X64-NO-BMI2-NO-SHLD: # %bb.0:
883 ; X64-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
884 ; X64-NO-BMI2-NO-SHLD-NEXT: shll $3, %esi
885 ; X64-NO-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
886 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax
887 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm0, %rdi
888 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
889 ; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rdi
890 ; X64-NO-BMI2-NO-SHLD-NEXT: notb %cl
891 ; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r8
892 ; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r8
893 ; X64-NO-BMI2-NO-SHLD-NEXT: orq %rdi, %r8
894 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
895 ; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rax
896 ; X64-NO-BMI2-NO-SHLD-NEXT: testb $64, %sil
897 ; X64-NO-BMI2-NO-SHLD-NEXT: cmoveq %r8, %rax
898 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %rax, (%rdx)
899 ; X64-NO-BMI2-NO-SHLD-NEXT: retq
901 ; X64-NO-BMI2-HAVE-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca_with_zero_upper_half:
902 ; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
903 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx
904 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
905 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
906 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax
907 ; X64-NO-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
908 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi
909 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
910 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %rsi
911 ; X64-NO-BMI2-HAVE-SHLD-NEXT: testb $64, %cl
912 ; X64-NO-BMI2-HAVE-SHLD-NEXT: cmoveq %rax, %rsi
913 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, (%rdx)
914 ; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
916 ; X64-HAVE-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca_with_zero_upper_half:
917 ; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
918 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
919 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %esi
920 ; X64-HAVE-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
921 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax
922 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm0, %rcx
923 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rcx, %rcx
924 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %edi
925 ; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %dil
926 ; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r8
927 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rdi, %r8, %rdi
928 ; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rdi
929 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rax, %rax
930 ; X64-HAVE-BMI2-NO-SHLD-NEXT: testb $64, %sil
931 ; X64-HAVE-BMI2-NO-SHLD-NEXT: cmoveq %rdi, %rax
932 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, (%rdx)
933 ; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
935 ; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca_with_zero_upper_half:
936 ; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
937 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx
938 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
939 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
940 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax
941 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
942 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi
943 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
944 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %rsi, %rsi
945 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: testb $64, %cl
946 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: cmoveq %rax, %rsi
947 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, (%rdx)
948 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
950 ; X86-LABEL: load_8byte_chunk_of_16byte_alloca_with_zero_upper_half:
952 ; X86-NEXT: subl $32, %esp
953 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
954 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
955 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
956 ; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
957 ; X86-NEXT: shll $3, %ecx
958 ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
959 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
960 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
961 ; X86-NEXT: movd %xmm0, (%esp)
962 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
963 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
964 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
965 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
966 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
967 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
968 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
969 ; X86-NEXT: shrb $3, %cl
970 ; X86-NEXT: andb $15, %cl
971 ; X86-NEXT: movzbl %cl, %ecx
972 ; X86-NEXT: movl (%esp,%ecx), %edx
973 ; X86-NEXT: movl 4(%esp,%ecx), %ecx
974 ; X86-NEXT: movl %ecx, 4(%eax)
975 ; X86-NEXT: movl %edx, (%eax)
976 ; X86-NEXT: addl $32, %esp
978 %init = load <8 x i8>, ptr %src, align 1
979 %intermediate.sroa.0.0.vec.expand = shufflevector <8 x i8> %init, <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
980 %intermediate.sroa.0.0.vecblend = shufflevector <16 x i8> %intermediate.sroa.0.0.vec.expand, <16 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
981 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
982 %intermediate.val.frozen = freeze <16 x i8> %intermediate.sroa.0.0.vecblend
983 %intermediate.val.frozen.bits = bitcast <16 x i8> %intermediate.val.frozen to i128
984 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i128
985 %intermediate.val.frozen.bits.positioned = lshr i128 %intermediate.val.frozen.bits, %byteOff.numbits.wide
986 %intermediate.val.frozen.bits.positioned.extracted = trunc i128 %intermediate.val.frozen.bits.positioned to i64
987 store i64 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 8
991 define void @load_1byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
992 ; X64-LABEL: load_1byte_chunk_of_32byte_alloca_with_zero_upper_half:
994 ; X64-NEXT: movdqu (%rdi), %xmm0
995 ; X64-NEXT: shll $3, %esi
996 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
997 ; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
998 ; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
999 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1000 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1001 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1002 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1003 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1004 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1005 ; X64-NEXT: shrb $3, %sil
1006 ; X64-NEXT: movzbl %sil, %eax
1007 ; X64-NEXT: movzbl -64(%rsp,%rax), %eax
1008 ; X64-NEXT: movb %al, (%rdx)
1011 ; X86-LABEL: load_1byte_chunk_of_32byte_alloca_with_zero_upper_half:
1013 ; X86-NEXT: subl $64, %esp
1014 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1015 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1016 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1017 ; X86-NEXT: movdqu (%edx), %xmm0
1018 ; X86-NEXT: shll $3, %ecx
1019 ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
1020 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
1021 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
1022 ; X86-NEXT: movd %xmm0, (%esp)
1023 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
1024 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
1025 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
1026 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1027 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1028 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1029 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1030 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1031 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1032 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1033 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1034 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1035 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1036 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1037 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1038 ; X86-NEXT: shrb $3, %cl
1039 ; X86-NEXT: movzbl %cl, %ecx
1040 ; X86-NEXT: movzbl (%esp,%ecx), %ecx
1041 ; X86-NEXT: movb %cl, (%eax)
1042 ; X86-NEXT: addl $64, %esp
1044 %init = load <16 x i8>, ptr %src, align 1
1045 %intermediate.sroa.0.0.vec.expand = shufflevector <16 x i8> %init, <16 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1046 %intermediate.sroa.0.0.vecblend = shufflevector <32 x i8> %intermediate.sroa.0.0.vec.expand, <32 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
1047 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
1048 %intermediate.val.frozen = freeze <32 x i8> %intermediate.sroa.0.0.vecblend
1049 %intermediate.val.frozen.bits = bitcast <32 x i8> %intermediate.val.frozen to i256
1050 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i256
1051 %intermediate.val.frozen.bits.positioned = lshr i256 %intermediate.val.frozen.bits, %byteOff.numbits.wide
1052 %intermediate.val.frozen.bits.positioned.extracted = trunc i256 %intermediate.val.frozen.bits.positioned to i8
1053 %1 = insertelement <1 x i8> poison, i8 %intermediate.val.frozen.bits.positioned.extracted, i64 0
1054 store <1 x i8> %1, ptr %dst, align 1
1058 define void @load_2byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
1059 ; X64-LABEL: load_2byte_chunk_of_32byte_alloca_with_zero_upper_half:
1061 ; X64-NEXT: movdqu (%rdi), %xmm0
1062 ; X64-NEXT: shll $3, %esi
1063 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
1064 ; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
1065 ; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
1066 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1067 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1068 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1069 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1070 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1071 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1072 ; X64-NEXT: shrb $3, %sil
1073 ; X64-NEXT: movzbl %sil, %eax
1074 ; X64-NEXT: movq -64(%rsp,%rax), %rax
1075 ; X64-NEXT: movw %ax, (%rdx)
1078 ; X86-LABEL: load_2byte_chunk_of_32byte_alloca_with_zero_upper_half:
1080 ; X86-NEXT: subl $64, %esp
1081 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1082 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1083 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1084 ; X86-NEXT: movdqu (%edx), %xmm0
1085 ; X86-NEXT: shll $3, %ecx
1086 ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
1087 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
1088 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
1089 ; X86-NEXT: movd %xmm0, (%esp)
1090 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
1091 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
1092 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
1093 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1094 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1095 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1096 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1097 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1098 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1099 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1100 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1101 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1102 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1103 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1104 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1105 ; X86-NEXT: shrb $3, %cl
1106 ; X86-NEXT: movzbl %cl, %ecx
1107 ; X86-NEXT: movl (%esp,%ecx), %ecx
1108 ; X86-NEXT: movw %cx, (%eax)
1109 ; X86-NEXT: addl $64, %esp
1111 %init = load <16 x i8>, ptr %src, align 1
1112 %intermediate.sroa.0.0.vec.expand = shufflevector <16 x i8> %init, <16 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1113 %intermediate.sroa.0.0.vecblend = shufflevector <32 x i8> %intermediate.sroa.0.0.vec.expand, <32 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
1114 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
1115 %intermediate.val.frozen = freeze <32 x i8> %intermediate.sroa.0.0.vecblend
1116 %intermediate.val.frozen.bits = bitcast <32 x i8> %intermediate.val.frozen to i256
1117 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i256
1118 %intermediate.val.frozen.bits.positioned = lshr i256 %intermediate.val.frozen.bits, %byteOff.numbits.wide
1119 %intermediate.val.frozen.bits.positioned.extracted = trunc i256 %intermediate.val.frozen.bits.positioned to i16
1120 store i16 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 2
1124 define void @load_4byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
1125 ; X64-LABEL: load_4byte_chunk_of_32byte_alloca_with_zero_upper_half:
1127 ; X64-NEXT: movdqu (%rdi), %xmm0
1128 ; X64-NEXT: shll $3, %esi
1129 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
1130 ; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
1131 ; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
1132 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1133 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1134 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1135 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1136 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1137 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1138 ; X64-NEXT: shrb $3, %sil
1139 ; X64-NEXT: movzbl %sil, %eax
1140 ; X64-NEXT: movl -64(%rsp,%rax), %eax
1141 ; X64-NEXT: movl %eax, (%rdx)
1144 ; X86-LABEL: load_4byte_chunk_of_32byte_alloca_with_zero_upper_half:
1146 ; X86-NEXT: subl $64, %esp
1147 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1148 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1149 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1150 ; X86-NEXT: movdqu (%edx), %xmm0
1151 ; X86-NEXT: shll $3, %ecx
1152 ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
1153 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
1154 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
1155 ; X86-NEXT: movd %xmm0, (%esp)
1156 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
1157 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
1158 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
1159 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1160 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1161 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1162 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1163 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1164 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1165 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1166 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1167 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1168 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1169 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1170 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1171 ; X86-NEXT: shrb $3, %cl
1172 ; X86-NEXT: movzbl %cl, %ecx
1173 ; X86-NEXT: movl (%esp,%ecx), %ecx
1174 ; X86-NEXT: movl %ecx, (%eax)
1175 ; X86-NEXT: addl $64, %esp
1177 %init = load <16 x i8>, ptr %src, align 1
1178 %intermediate.sroa.0.0.vec.expand = shufflevector <16 x i8> %init, <16 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1179 %intermediate.sroa.0.0.vecblend = shufflevector <32 x i8> %intermediate.sroa.0.0.vec.expand, <32 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
1180 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
1181 %intermediate.val.frozen = freeze <32 x i8> %intermediate.sroa.0.0.vecblend
1182 %intermediate.val.frozen.bits = bitcast <32 x i8> %intermediate.val.frozen to i256
1183 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i256
1184 %intermediate.val.frozen.bits.positioned = lshr i256 %intermediate.val.frozen.bits, %byteOff.numbits.wide
1185 %intermediate.val.frozen.bits.positioned.extracted = trunc i256 %intermediate.val.frozen.bits.positioned to i32
1186 store i32 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 4
1190 define void @load_8byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
1191 ; X64-LABEL: load_8byte_chunk_of_32byte_alloca_with_zero_upper_half:
1193 ; X64-NEXT: movdqu (%rdi), %xmm0
1194 ; X64-NEXT: shll $3, %esi
1195 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
1196 ; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
1197 ; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
1198 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1199 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1200 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1201 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1202 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1203 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1204 ; X64-NEXT: shrb $3, %sil
1205 ; X64-NEXT: movzbl %sil, %eax
1206 ; X64-NEXT: movq -64(%rsp,%rax), %rax
1207 ; X64-NEXT: movq %rax, (%rdx)
1210 ; X86-LABEL: load_8byte_chunk_of_32byte_alloca_with_zero_upper_half:
1212 ; X86-NEXT: subl $64, %esp
1213 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1214 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1215 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1216 ; X86-NEXT: movdqu (%edx), %xmm0
1217 ; X86-NEXT: shll $3, %ecx
1218 ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
1219 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
1220 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
1221 ; X86-NEXT: movd %xmm0, (%esp)
1222 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
1223 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
1224 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
1225 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1226 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1227 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1228 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1229 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1230 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1231 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1232 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1233 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1234 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1235 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1236 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1237 ; X86-NEXT: shrb $3, %cl
1238 ; X86-NEXT: movzbl %cl, %ecx
1239 ; X86-NEXT: movl (%esp,%ecx), %edx
1240 ; X86-NEXT: movl 4(%esp,%ecx), %ecx
1241 ; X86-NEXT: movl %ecx, 4(%eax)
1242 ; X86-NEXT: movl %edx, (%eax)
1243 ; X86-NEXT: addl $64, %esp
1245 %init = load <16 x i8>, ptr %src, align 1
1246 %intermediate.sroa.0.0.vec.expand = shufflevector <16 x i8> %init, <16 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1247 %intermediate.sroa.0.0.vecblend = shufflevector <32 x i8> %intermediate.sroa.0.0.vec.expand, <32 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
1248 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
1249 %intermediate.val.frozen = freeze <32 x i8> %intermediate.sroa.0.0.vecblend
1250 %intermediate.val.frozen.bits = bitcast <32 x i8> %intermediate.val.frozen to i256
1251 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i256
1252 %intermediate.val.frozen.bits.positioned = lshr i256 %intermediate.val.frozen.bits, %byteOff.numbits.wide
1253 %intermediate.val.frozen.bits.positioned.extracted = trunc i256 %intermediate.val.frozen.bits.positioned to i64
1254 store i64 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 8
1258 define void @load_16byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
1259 ; X64-LABEL: load_16byte_chunk_of_32byte_alloca_with_zero_upper_half:
1261 ; X64-NEXT: movdqu (%rdi), %xmm0
1262 ; X64-NEXT: shll $3, %esi
1263 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
1264 ; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
1265 ; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
1266 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1267 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1268 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1269 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1270 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1271 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1272 ; X64-NEXT: shrb $3, %sil
1273 ; X64-NEXT: movzbl %sil, %eax
1274 ; X64-NEXT: movq -64(%rsp,%rax), %rcx
1275 ; X64-NEXT: movq -56(%rsp,%rax), %rax
1276 ; X64-NEXT: movq %rax, 8(%rdx)
1277 ; X64-NEXT: movq %rcx, (%rdx)
1280 ; X86-LABEL: load_16byte_chunk_of_32byte_alloca_with_zero_upper_half:
1282 ; X86-NEXT: pushl %edi
1283 ; X86-NEXT: pushl %esi
1284 ; X86-NEXT: subl $64, %esp
1285 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1286 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1287 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1288 ; X86-NEXT: movdqu (%edx), %xmm0
1289 ; X86-NEXT: shll $3, %ecx
1290 ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
1291 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
1292 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
1293 ; X86-NEXT: movd %xmm0, (%esp)
1294 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
1295 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
1296 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
1297 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1298 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1299 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1300 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1301 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1302 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1303 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1304 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1305 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1306 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1307 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1308 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1309 ; X86-NEXT: shrb $3, %cl
1310 ; X86-NEXT: movzbl %cl, %ecx
1311 ; X86-NEXT: movl (%esp,%ecx), %edx
1312 ; X86-NEXT: movl 4(%esp,%ecx), %esi
1313 ; X86-NEXT: movl 8(%esp,%ecx), %edi
1314 ; X86-NEXT: movl 12(%esp,%ecx), %ecx
1315 ; X86-NEXT: movl %ecx, 12(%eax)
1316 ; X86-NEXT: movl %edi, 8(%eax)
1317 ; X86-NEXT: movl %esi, 4(%eax)
1318 ; X86-NEXT: movl %edx, (%eax)
1319 ; X86-NEXT: addl $64, %esp
1320 ; X86-NEXT: popl %esi
1321 ; X86-NEXT: popl %edi
1323 %init = load <16 x i8>, ptr %src, align 1
1324 %intermediate.sroa.0.0.vec.expand = shufflevector <16 x i8> %init, <16 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1325 %intermediate.sroa.0.0.vecblend = shufflevector <32 x i8> %intermediate.sroa.0.0.vec.expand, <32 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
1326 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
1327 %intermediate.val.frozen = freeze <32 x i8> %intermediate.sroa.0.0.vecblend
1328 %intermediate.val.frozen.bits = bitcast <32 x i8> %intermediate.val.frozen to i256
1329 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i256
1330 %intermediate.val.frozen.bits.positioned = lshr i256 %intermediate.val.frozen.bits, %byteOff.numbits.wide
1331 %intermediate.val.frozen.bits.positioned.extracted = trunc i256 %intermediate.val.frozen.bits.positioned to i128
1332 store i128 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 16
1336 define void @load_1byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
1337 ; X64-LABEL: load_1byte_chunk_of_64byte_alloca_with_zero_upper_half:
1339 ; X64-NEXT: movdqu (%rdi), %xmm0
1340 ; X64-NEXT: movdqu 16(%rdi), %xmm1
1341 ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
1342 ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
1343 ; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
1344 ; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
1345 ; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
1346 ; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
1347 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1348 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1349 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1350 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1351 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1352 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1353 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1354 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1355 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1356 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1357 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1358 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1359 ; X64-NEXT: andl $63, %esi
1360 ; X64-NEXT: movzbl -128(%rsp,%rsi), %eax
1361 ; X64-NEXT: movb %al, (%rdx)
1364 ; X86-LABEL: load_1byte_chunk_of_64byte_alloca_with_zero_upper_half:
1366 ; X86-NEXT: subl $128, %esp
1367 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1368 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1369 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1370 ; X86-NEXT: movdqu (%edx), %xmm0
1371 ; X86-NEXT: movdqu 16(%edx), %xmm1
1372 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
1373 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
1374 ; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
1375 ; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
1376 ; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
1377 ; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
1378 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
1379 ; X86-NEXT: movd %xmm0, (%esp)
1380 ; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
1381 ; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
1382 ; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
1383 ; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
1384 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
1385 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
1386 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1387 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1388 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1389 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1390 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1391 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1392 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1393 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1394 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1395 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1396 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1397 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1398 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1399 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1400 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1401 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1402 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1403 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1404 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1405 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1406 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1407 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1408 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1409 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1410 ; X86-NEXT: andl $63, %ecx
1411 ; X86-NEXT: movzbl (%esp,%ecx), %ecx
1412 ; X86-NEXT: movb %cl, (%eax)
1413 ; X86-NEXT: addl $128, %esp
1415 %init = load <32 x i8>, ptr %src, align 1
1416 %intermediate.sroa.0.0.vec.expand = shufflevector <32 x i8> %init, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1417 %intermediate.sroa.0.0.vecblend = shufflevector <64 x i8> %intermediate.sroa.0.0.vec.expand, <64 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
1418 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
1419 %intermediate.val.frozen = freeze <64 x i8> %intermediate.sroa.0.0.vecblend
1420 %intermediate.val.frozen.bits = bitcast <64 x i8> %intermediate.val.frozen to i512
1421 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i512
1422 %intermediate.val.frozen.bits.positioned = lshr i512 %intermediate.val.frozen.bits, %byteOff.numbits.wide
1423 %intermediate.val.frozen.bits.positioned.extracted = trunc i512 %intermediate.val.frozen.bits.positioned to i8
1424 %1 = insertelement <1 x i8> poison, i8 %intermediate.val.frozen.bits.positioned.extracted, i64 0
1425 store <1 x i8> %1, ptr %dst, align 1
1429 define void @load_2byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
1430 ; X64-LABEL: load_2byte_chunk_of_64byte_alloca_with_zero_upper_half:
1432 ; X64-NEXT: movdqu (%rdi), %xmm0
1433 ; X64-NEXT: movdqu 16(%rdi), %xmm1
1434 ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
1435 ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
1436 ; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
1437 ; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
1438 ; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
1439 ; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
1440 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1441 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1442 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1443 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1444 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1445 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1446 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1447 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1448 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1449 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1450 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1451 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1452 ; X64-NEXT: andl $63, %esi
1453 ; X64-NEXT: movq -128(%rsp,%rsi), %rax
1454 ; X64-NEXT: movw %ax, (%rdx)
1457 ; X86-LABEL: load_2byte_chunk_of_64byte_alloca_with_zero_upper_half:
1459 ; X86-NEXT: subl $128, %esp
1460 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1461 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1462 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1463 ; X86-NEXT: movdqu (%edx), %xmm0
1464 ; X86-NEXT: movdqu 16(%edx), %xmm1
1465 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
1466 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
1467 ; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
1468 ; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
1469 ; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
1470 ; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
1471 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
1472 ; X86-NEXT: movd %xmm0, (%esp)
1473 ; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
1474 ; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
1475 ; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
1476 ; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
1477 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
1478 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
1479 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1480 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1481 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1482 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1483 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1484 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1485 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1486 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1487 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1488 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1489 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1490 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1491 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1492 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1493 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1494 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1495 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1496 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1497 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1498 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1499 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1500 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1501 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1502 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1503 ; X86-NEXT: andl $63, %ecx
1504 ; X86-NEXT: movl (%esp,%ecx), %ecx
1505 ; X86-NEXT: movw %cx, (%eax)
1506 ; X86-NEXT: addl $128, %esp
1508 %init = load <32 x i8>, ptr %src, align 1
1509 %intermediate.sroa.0.0.vec.expand = shufflevector <32 x i8> %init, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1510 %intermediate.sroa.0.0.vecblend = shufflevector <64 x i8> %intermediate.sroa.0.0.vec.expand, <64 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
1511 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
1512 %intermediate.val.frozen = freeze <64 x i8> %intermediate.sroa.0.0.vecblend
1513 %intermediate.val.frozen.bits = bitcast <64 x i8> %intermediate.val.frozen to i512
1514 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i512
1515 %intermediate.val.frozen.bits.positioned = lshr i512 %intermediate.val.frozen.bits, %byteOff.numbits.wide
1516 %intermediate.val.frozen.bits.positioned.extracted = trunc i512 %intermediate.val.frozen.bits.positioned to i16
1517 store i16 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 2
1521 define void @load_4byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
1522 ; X64-LABEL: load_4byte_chunk_of_64byte_alloca_with_zero_upper_half:
1524 ; X64-NEXT: movdqu (%rdi), %xmm0
1525 ; X64-NEXT: movdqu 16(%rdi), %xmm1
1526 ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
1527 ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
1528 ; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
1529 ; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
1530 ; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
1531 ; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
1532 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1533 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1534 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1535 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1536 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1537 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1538 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1539 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1540 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1541 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1542 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1543 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1544 ; X64-NEXT: andl $63, %esi
1545 ; X64-NEXT: movl -128(%rsp,%rsi), %eax
1546 ; X64-NEXT: movl %eax, (%rdx)
1549 ; X86-LABEL: load_4byte_chunk_of_64byte_alloca_with_zero_upper_half:
1551 ; X86-NEXT: subl $128, %esp
1552 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1553 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1554 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1555 ; X86-NEXT: movdqu (%edx), %xmm0
1556 ; X86-NEXT: movdqu 16(%edx), %xmm1
1557 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
1558 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
1559 ; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
1560 ; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
1561 ; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
1562 ; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
1563 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
1564 ; X86-NEXT: movd %xmm0, (%esp)
1565 ; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
1566 ; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
1567 ; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
1568 ; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
1569 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
1570 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
1571 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1572 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1573 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1574 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1575 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1576 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1577 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1578 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1579 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1580 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1581 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1582 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1583 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1584 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1585 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1586 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1587 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1588 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1589 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1590 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1591 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1592 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1593 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1594 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1595 ; X86-NEXT: andl $63, %ecx
1596 ; X86-NEXT: movl (%esp,%ecx), %ecx
1597 ; X86-NEXT: movl %ecx, (%eax)
1598 ; X86-NEXT: addl $128, %esp
1600 %init = load <32 x i8>, ptr %src, align 1
1601 %intermediate.sroa.0.0.vec.expand = shufflevector <32 x i8> %init, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1602 %intermediate.sroa.0.0.vecblend = shufflevector <64 x i8> %intermediate.sroa.0.0.vec.expand, <64 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
1603 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
1604 %intermediate.val.frozen = freeze <64 x i8> %intermediate.sroa.0.0.vecblend
1605 %intermediate.val.frozen.bits = bitcast <64 x i8> %intermediate.val.frozen to i512
1606 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i512
1607 %intermediate.val.frozen.bits.positioned = lshr i512 %intermediate.val.frozen.bits, %byteOff.numbits.wide
1608 %intermediate.val.frozen.bits.positioned.extracted = trunc i512 %intermediate.val.frozen.bits.positioned to i32
1609 store i32 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 4
1613 define void @load_8byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
1614 ; X64-LABEL: load_8byte_chunk_of_64byte_alloca_with_zero_upper_half:
1616 ; X64-NEXT: movdqu (%rdi), %xmm0
1617 ; X64-NEXT: movdqu 16(%rdi), %xmm1
1618 ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
1619 ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
1620 ; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
1621 ; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
1622 ; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
1623 ; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
1624 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1625 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1626 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1627 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1628 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1629 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1630 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1631 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1632 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1633 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1634 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1635 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1636 ; X64-NEXT: andl $63, %esi
1637 ; X64-NEXT: movq -128(%rsp,%rsi), %rax
1638 ; X64-NEXT: movq %rax, (%rdx)
1641 ; X86-LABEL: load_8byte_chunk_of_64byte_alloca_with_zero_upper_half:
1643 ; X86-NEXT: subl $128, %esp
1644 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1645 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1646 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1647 ; X86-NEXT: movdqu (%edx), %xmm0
1648 ; X86-NEXT: movdqu 16(%edx), %xmm1
1649 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
1650 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
1651 ; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
1652 ; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
1653 ; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
1654 ; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
1655 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
1656 ; X86-NEXT: movd %xmm0, (%esp)
1657 ; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
1658 ; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
1659 ; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
1660 ; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
1661 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
1662 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
1663 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1664 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1665 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1666 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1667 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1668 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1669 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1670 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1671 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1672 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1673 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1674 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1675 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1676 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1677 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1678 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1679 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1680 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1681 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1682 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1683 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1684 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1685 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1686 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1687 ; X86-NEXT: andl $63, %ecx
1688 ; X86-NEXT: movl (%esp,%ecx), %edx
1689 ; X86-NEXT: movl 4(%esp,%ecx), %ecx
1690 ; X86-NEXT: movl %ecx, 4(%eax)
1691 ; X86-NEXT: movl %edx, (%eax)
1692 ; X86-NEXT: addl $128, %esp
1694 %init = load <32 x i8>, ptr %src, align 1
1695 %intermediate.sroa.0.0.vec.expand = shufflevector <32 x i8> %init, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1696 %intermediate.sroa.0.0.vecblend = shufflevector <64 x i8> %intermediate.sroa.0.0.vec.expand, <64 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
1697 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
1698 %intermediate.val.frozen = freeze <64 x i8> %intermediate.sroa.0.0.vecblend
1699 %intermediate.val.frozen.bits = bitcast <64 x i8> %intermediate.val.frozen to i512
1700 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i512
1701 %intermediate.val.frozen.bits.positioned = lshr i512 %intermediate.val.frozen.bits, %byteOff.numbits.wide
1702 %intermediate.val.frozen.bits.positioned.extracted = trunc i512 %intermediate.val.frozen.bits.positioned to i64
1703 store i64 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 8
1707 define void @load_16byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
1708 ; X64-LABEL: load_16byte_chunk_of_64byte_alloca_with_zero_upper_half:
1710 ; X64-NEXT: movdqu (%rdi), %xmm0
1711 ; X64-NEXT: movdqu 16(%rdi), %xmm1
1712 ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
1713 ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
1714 ; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
1715 ; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
1716 ; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
1717 ; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
1718 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1719 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1720 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1721 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1722 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1723 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1724 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1725 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1726 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1727 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1728 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1729 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1730 ; X64-NEXT: andl $63, %esi
1731 ; X64-NEXT: movq -128(%rsp,%rsi), %rax
1732 ; X64-NEXT: movq -120(%rsp,%rsi), %rcx
1733 ; X64-NEXT: movq %rcx, 8(%rdx)
1734 ; X64-NEXT: movq %rax, (%rdx)
1737 ; X86-LABEL: load_16byte_chunk_of_64byte_alloca_with_zero_upper_half:
1739 ; X86-NEXT: pushl %edi
1740 ; X86-NEXT: pushl %esi
1741 ; X86-NEXT: subl $128, %esp
1742 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1743 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1744 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1745 ; X86-NEXT: movdqu (%edx), %xmm0
1746 ; X86-NEXT: movdqu 16(%edx), %xmm1
1747 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
1748 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
1749 ; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
1750 ; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
1751 ; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
1752 ; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
1753 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
1754 ; X86-NEXT: movd %xmm0, (%esp)
1755 ; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
1756 ; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
1757 ; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
1758 ; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
1759 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
1760 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
1761 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1762 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1763 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1764 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1765 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1766 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1767 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1768 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1769 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1770 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1771 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1772 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1773 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1774 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1775 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1776 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1777 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1778 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1779 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1780 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1781 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1782 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1783 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1784 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1785 ; X86-NEXT: andl $63, %ecx
1786 ; X86-NEXT: movl (%esp,%ecx), %edx
1787 ; X86-NEXT: movl 4(%esp,%ecx), %esi
1788 ; X86-NEXT: movl 8(%esp,%ecx), %edi
1789 ; X86-NEXT: movl 12(%esp,%ecx), %ecx
1790 ; X86-NEXT: movl %ecx, 12(%eax)
1791 ; X86-NEXT: movl %edi, 8(%eax)
1792 ; X86-NEXT: movl %esi, 4(%eax)
1793 ; X86-NEXT: movl %edx, (%eax)
1794 ; X86-NEXT: addl $128, %esp
1795 ; X86-NEXT: popl %esi
1796 ; X86-NEXT: popl %edi
1798 %init = load <32 x i8>, ptr %src, align 1
1799 %intermediate.sroa.0.0.vec.expand = shufflevector <32 x i8> %init, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1800 %intermediate.sroa.0.0.vecblend = shufflevector <64 x i8> %intermediate.sroa.0.0.vec.expand, <64 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
1801 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
1802 %intermediate.val.frozen = freeze <64 x i8> %intermediate.sroa.0.0.vecblend
1803 %intermediate.val.frozen.bits = bitcast <64 x i8> %intermediate.val.frozen to i512
1804 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i512
1805 %intermediate.val.frozen.bits.positioned = lshr i512 %intermediate.val.frozen.bits, %byteOff.numbits.wide
1806 %intermediate.val.frozen.bits.positioned.extracted = trunc i512 %intermediate.val.frozen.bits.positioned to i128
1807 store i128 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 16
1811 define void @load_32byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
1812 ; X64-LABEL: load_32byte_chunk_of_64byte_alloca_with_zero_upper_half:
1814 ; X64-NEXT: movdqu (%rdi), %xmm0
1815 ; X64-NEXT: movdqu 16(%rdi), %xmm1
1816 ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
1817 ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
1818 ; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
1819 ; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
1820 ; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
1821 ; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
1822 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1823 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1824 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1825 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1826 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1827 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1828 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1829 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1830 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1831 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1832 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1833 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1834 ; X64-NEXT: andl $63, %esi
1835 ; X64-NEXT: movq -128(%rsp,%rsi), %rax
1836 ; X64-NEXT: movq -120(%rsp,%rsi), %rcx
1837 ; X64-NEXT: movq -112(%rsp,%rsi), %rdi
1838 ; X64-NEXT: movq -104(%rsp,%rsi), %rsi
1839 ; X64-NEXT: movq %rsi, 24(%rdx)
1840 ; X64-NEXT: movq %rdi, 16(%rdx)
1841 ; X64-NEXT: movq %rcx, 8(%rdx)
1842 ; X64-NEXT: movq %rax, (%rdx)
1845 ; X86-LABEL: load_32byte_chunk_of_64byte_alloca_with_zero_upper_half:
1847 ; X86-NEXT: pushl %ebp
1848 ; X86-NEXT: pushl %ebx
1849 ; X86-NEXT: pushl %edi
1850 ; X86-NEXT: pushl %esi
1851 ; X86-NEXT: subl $136, %esp
1852 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1853 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1854 ; X86-NEXT: movdqu (%ecx), %xmm0
1855 ; X86-NEXT: movdqu 16(%ecx), %xmm1
1856 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
1857 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
1858 ; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
1859 ; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
1860 ; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
1861 ; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
1862 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
1863 ; X86-NEXT: movd %xmm0, {{[0-9]+}}(%esp)
1864 ; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
1865 ; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
1866 ; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
1867 ; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
1868 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
1869 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
1870 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1871 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1872 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1873 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1874 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1875 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1876 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1877 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1878 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1879 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1880 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1881 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1882 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1883 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1884 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1885 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1886 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1887 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1888 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1889 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1890 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1891 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1892 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1893 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1894 ; X86-NEXT: andl $63, %eax
1895 ; X86-NEXT: movl 8(%esp,%eax), %ecx
1896 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1897 ; X86-NEXT: movl 12(%esp,%eax), %ecx
1898 ; X86-NEXT: movl %ecx, (%esp) # 4-byte Spill
1899 ; X86-NEXT: movl 16(%esp,%eax), %esi
1900 ; X86-NEXT: movl 20(%esp,%eax), %edi
1901 ; X86-NEXT: movl 24(%esp,%eax), %ebx
1902 ; X86-NEXT: movl 28(%esp,%eax), %ebp
1903 ; X86-NEXT: movl 32(%esp,%eax), %edx
1904 ; X86-NEXT: movl 36(%esp,%eax), %ecx
1905 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1906 ; X86-NEXT: movl %ecx, 28(%eax)
1907 ; X86-NEXT: movl %edx, 24(%eax)
1908 ; X86-NEXT: movl %ebp, 20(%eax)
1909 ; X86-NEXT: movl %ebx, 16(%eax)
1910 ; X86-NEXT: movl %edi, 12(%eax)
1911 ; X86-NEXT: movl %esi, 8(%eax)
1912 ; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
1913 ; X86-NEXT: movl %ecx, 4(%eax)
1914 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
1915 ; X86-NEXT: movl %ecx, (%eax)
1916 ; X86-NEXT: addl $136, %esp
1917 ; X86-NEXT: popl %esi
1918 ; X86-NEXT: popl %edi
1919 ; X86-NEXT: popl %ebx
1920 ; X86-NEXT: popl %ebp
1922 %init = load <32 x i8>, ptr %src, align 1
1923 %intermediate.sroa.0.0.vec.expand = shufflevector <32 x i8> %init, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1924 %intermediate.sroa.0.0.vecblend = shufflevector <64 x i8> %intermediate.sroa.0.0.vec.expand, <64 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
1925 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
1926 %intermediate.val.frozen = freeze <64 x i8> %intermediate.sroa.0.0.vecblend
1927 %intermediate.val.frozen.bits = bitcast <64 x i8> %intermediate.val.frozen to i512
1928 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i512
1929 %intermediate.val.frozen.bits.positioned = lshr i512 %intermediate.val.frozen.bits, %byteOff.numbits.wide
1930 %intermediate.val.frozen.bits.positioned.extracted = trunc i512 %intermediate.val.frozen.bits.positioned to i256
1931 store i256 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 32
1934 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
1936 ; X64-NO-SHLD: {{.*}}
1938 ; X86-NO-SHLD: {{.*}}