1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2,-bmi2,+slow-shld | FileCheck %s --check-prefixes=ALL,X64,X64-NO-BMI2,X64-NO-SHLD,X64-NO-BMI2-NO-SHLD
3 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2,-bmi2,-slow-shld | FileCheck %s --check-prefixes=ALL,X64,X64-NO-BMI2,X64-SHLD,X64-NO-BMI2-HAVE-SHLD
4 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2,+bmi2,+slow-shld | FileCheck %s --check-prefixes=ALL,X64,X64-BMI2,X64-NO-SHLD,X64-HAVE-BMI2-NO-SHLD
5 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2,+bmi2,-slow-shld | FileCheck %s --check-prefixes=ALL,X64,X64-BMI2,X64-SHLD,X64-HAVE-BMI2-HAVE-SHLD
6 ; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+sse2,-bmi2,+slow-shld | FileCheck %s --check-prefixes=ALL,X86,X86-NO-BMI2,X86-NO-SHLD,X86-NO-BMI2-NO-SHLD
7 ; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+sse2,-bmi2,-slow-shld | FileCheck %s --check-prefixes=ALL,X86,X86-NO-BMI2,X86-SHLD,X86-NO-BMI2-HAVE-SHLD
8 ; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+sse2,+bmi2,+slow-shld | FileCheck %s --check-prefixes=ALL,X86,X86-BMI2,X86-NO-SHLD,X86-HAVE-BMI2-NO-SHLD
9 ; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+sse2,+bmi2,-slow-shld | FileCheck %s --check-prefixes=ALL,X86,X86-BMI2,X86-SHLD,X86-HAVE-BMI2-HAVE-SHLD
11 ; no @load_1byte_chunk_of_1byte_alloca
13 define void @load_1byte_chunk_of_2byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
14 ; X64-NO-BMI2-LABEL: load_1byte_chunk_of_2byte_alloca:
15 ; X64-NO-BMI2: # %bb.0:
16 ; X64-NO-BMI2-NEXT: movzwl (%rdi), %eax
17 ; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
18 ; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
19 ; X64-NO-BMI2-NEXT: shrl %cl, %eax
20 ; X64-NO-BMI2-NEXT: movb %al, (%rdx)
21 ; X64-NO-BMI2-NEXT: retq
23 ; X64-BMI2-LABEL: load_1byte_chunk_of_2byte_alloca:
25 ; X64-BMI2-NEXT: movzwl (%rdi), %eax
26 ; X64-BMI2-NEXT: shll $3, %esi
27 ; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
28 ; X64-BMI2-NEXT: movb %al, (%rdx)
31 ; X86-NO-BMI2-LABEL: load_1byte_chunk_of_2byte_alloca:
32 ; X86-NO-BMI2: # %bb.0:
33 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
34 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
35 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
36 ; X86-NO-BMI2-NEXT: movzwl (%eax), %eax
37 ; X86-NO-BMI2-NEXT: shll $3, %ecx
38 ; X86-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
39 ; X86-NO-BMI2-NEXT: shrl %cl, %eax
40 ; X86-NO-BMI2-NEXT: movb %al, (%edx)
41 ; X86-NO-BMI2-NEXT: retl
43 ; X86-BMI2-LABEL: load_1byte_chunk_of_2byte_alloca:
45 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
46 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
47 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
48 ; X86-BMI2-NEXT: movzwl (%edx), %edx
49 ; X86-BMI2-NEXT: shll $3, %ecx
50 ; X86-BMI2-NEXT: shrxl %ecx, %edx, %ecx
51 ; X86-BMI2-NEXT: movb %cl, (%eax)
53 %init = load <2 x i8>, ptr %src, align 1
54 %intermediate.val.frozen = freeze <2 x i8> %init
55 %intermediate.val.frozen.bits = bitcast <2 x i8> %intermediate.val.frozen to i16
56 %byteOff.tr = trunc i64 %byteOff to i16
57 %byteOff.numbits.wide = shl i16 %byteOff.tr, 3
58 %intermediate.val.frozen.bits.positioned = lshr i16 %intermediate.val.frozen.bits, %byteOff.numbits.wide
59 %intermediate.val.frozen.bits.positioned.extracted = trunc i16 %intermediate.val.frozen.bits.positioned to i8
60 %1 = insertelement <1 x i8> poison, i8 %intermediate.val.frozen.bits.positioned.extracted, i64 0
61 store <1 x i8> %1, ptr %dst, align 1
65 ; no @load_2byte_chunk_of_2byte_alloca
67 define void @load_1byte_chunk_of_4byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
68 ; X64-NO-BMI2-LABEL: load_1byte_chunk_of_4byte_alloca:
69 ; X64-NO-BMI2: # %bb.0:
70 ; X64-NO-BMI2-NEXT: movl (%rdi), %eax
71 ; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
72 ; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
73 ; X64-NO-BMI2-NEXT: shrl %cl, %eax
74 ; X64-NO-BMI2-NEXT: movb %al, (%rdx)
75 ; X64-NO-BMI2-NEXT: retq
77 ; X64-BMI2-LABEL: load_1byte_chunk_of_4byte_alloca:
79 ; X64-BMI2-NEXT: shll $3, %esi
80 ; X64-BMI2-NEXT: shrxl %esi, (%rdi), %eax
81 ; X64-BMI2-NEXT: movb %al, (%rdx)
84 ; X86-NO-BMI2-LABEL: load_1byte_chunk_of_4byte_alloca:
85 ; X86-NO-BMI2: # %bb.0:
86 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
87 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
88 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
89 ; X86-NO-BMI2-NEXT: movl (%eax), %eax
90 ; X86-NO-BMI2-NEXT: shll $3, %ecx
91 ; X86-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
92 ; X86-NO-BMI2-NEXT: shrl %cl, %eax
93 ; X86-NO-BMI2-NEXT: movb %al, (%edx)
94 ; X86-NO-BMI2-NEXT: retl
96 ; X86-BMI2-LABEL: load_1byte_chunk_of_4byte_alloca:
98 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
99 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
100 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
101 ; X86-BMI2-NEXT: shll $3, %ecx
102 ; X86-BMI2-NEXT: shrxl %ecx, (%edx), %ecx
103 ; X86-BMI2-NEXT: movb %cl, (%eax)
104 ; X86-BMI2-NEXT: retl
105 %init = load <4 x i8>, ptr %src, align 1
106 %intermediate.val.frozen = freeze <4 x i8> %init
107 %intermediate.val.frozen.bits = bitcast <4 x i8> %intermediate.val.frozen to i32
108 %byteOff.tr = trunc i64 %byteOff to i32
109 %byteOff.numbits.wide = shl i32 %byteOff.tr, 3
110 %intermediate.val.frozen.bits.positioned = lshr i32 %intermediate.val.frozen.bits, %byteOff.numbits.wide
111 %intermediate.val.frozen.bits.positioned.extracted = trunc i32 %intermediate.val.frozen.bits.positioned to i8
112 %1 = insertelement <1 x i8> poison, i8 %intermediate.val.frozen.bits.positioned.extracted, i64 0
113 store <1 x i8> %1, ptr %dst, align 1
117 define void @load_2byte_chunk_of_4byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
118 ; X64-NO-BMI2-LABEL: load_2byte_chunk_of_4byte_alloca:
119 ; X64-NO-BMI2: # %bb.0:
120 ; X64-NO-BMI2-NEXT: movl (%rdi), %eax
121 ; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
122 ; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
123 ; X64-NO-BMI2-NEXT: shrl %cl, %eax
124 ; X64-NO-BMI2-NEXT: movw %ax, (%rdx)
125 ; X64-NO-BMI2-NEXT: retq
127 ; X64-BMI2-LABEL: load_2byte_chunk_of_4byte_alloca:
129 ; X64-BMI2-NEXT: shll $3, %esi
130 ; X64-BMI2-NEXT: shrxl %esi, (%rdi), %eax
131 ; X64-BMI2-NEXT: movw %ax, (%rdx)
132 ; X64-BMI2-NEXT: retq
134 ; X86-NO-BMI2-LABEL: load_2byte_chunk_of_4byte_alloca:
135 ; X86-NO-BMI2: # %bb.0:
136 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
137 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
138 ; X86-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
139 ; X86-NO-BMI2-NEXT: movl (%edx), %edx
140 ; X86-NO-BMI2-NEXT: shll $3, %ecx
141 ; X86-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
142 ; X86-NO-BMI2-NEXT: shrl %cl, %edx
143 ; X86-NO-BMI2-NEXT: movw %dx, (%eax)
144 ; X86-NO-BMI2-NEXT: retl
146 ; X86-BMI2-LABEL: load_2byte_chunk_of_4byte_alloca:
148 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
149 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
150 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
151 ; X86-BMI2-NEXT: shll $3, %ecx
152 ; X86-BMI2-NEXT: shrxl %ecx, (%edx), %ecx
153 ; X86-BMI2-NEXT: movw %cx, (%eax)
154 ; X86-BMI2-NEXT: retl
155 %init = load <4 x i8>, ptr %src, align 1
156 %intermediate.val.frozen = freeze <4 x i8> %init
157 %intermediate.val.frozen.bits = bitcast <4 x i8> %intermediate.val.frozen to i32
158 %byteOff.tr = trunc i64 %byteOff to i32
159 %byteOff.numbits.wide = shl i32 %byteOff.tr, 3
160 %intermediate.val.frozen.bits.positioned = lshr i32 %intermediate.val.frozen.bits, %byteOff.numbits.wide
161 %intermediate.val.frozen.bits.positioned.extracted = trunc i32 %intermediate.val.frozen.bits.positioned to i16
162 store i16 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 2
166 ; no @load_4byte_chunk_of_4byte_alloca
168 define void @load_1byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
169 ; X64-NO-BMI2-LABEL: load_1byte_chunk_of_8byte_alloca:
170 ; X64-NO-BMI2: # %bb.0:
171 ; X64-NO-BMI2-NEXT: movq (%rdi), %rax
172 ; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
173 ; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
174 ; X64-NO-BMI2-NEXT: shrq %cl, %rax
175 ; X64-NO-BMI2-NEXT: movb %al, (%rdx)
176 ; X64-NO-BMI2-NEXT: retq
178 ; X64-BMI2-LABEL: load_1byte_chunk_of_8byte_alloca:
180 ; X64-BMI2-NEXT: shll $3, %esi
181 ; X64-BMI2-NEXT: shrxq %rsi, (%rdi), %rax
182 ; X64-BMI2-NEXT: movb %al, (%rdx)
183 ; X64-BMI2-NEXT: retq
185 ; X86-NO-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_8byte_alloca:
186 ; X86-NO-BMI2-NO-SHLD: # %bb.0:
187 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
188 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
189 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
190 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
191 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
192 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
193 ; X86-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
194 ; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax
195 ; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm0, %esi
196 ; X86-NO-BMI2-NO-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
197 ; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm0, %ebx
198 ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
199 ; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
200 ; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
201 ; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %edi
202 ; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
203 ; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edi
204 ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
205 ; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
206 ; X86-NO-BMI2-NO-SHLD-NEXT: testb $32, %al
207 ; X86-NO-BMI2-NO-SHLD-NEXT: cmovel %edi, %ebx
208 ; X86-NO-BMI2-NO-SHLD-NEXT: movb %bl, (%edx)
209 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
210 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
211 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
212 ; X86-NO-BMI2-NO-SHLD-NEXT: retl
214 ; X86-NO-BMI2-HAVE-SHLD-LABEL: load_1byte_chunk_of_8byte_alloca:
215 ; X86-NO-BMI2-HAVE-SHLD: # %bb.0:
216 ; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %esi
217 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
218 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
219 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
220 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
221 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
222 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi
223 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
224 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx
225 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi
226 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edx
227 ; X86-NO-BMI2-HAVE-SHLD-NEXT: testb $32, %cl
228 ; X86-NO-BMI2-HAVE-SHLD-NEXT: cmovel %esi, %edx
229 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movb %dl, (%eax)
230 ; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %esi
231 ; X86-NO-BMI2-HAVE-SHLD-NEXT: retl
233 ; X86-HAVE-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_8byte_alloca:
234 ; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
235 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
236 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
237 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
238 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
239 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
240 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
241 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
242 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
243 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm0, %edx
244 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
245 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm0, %esi
246 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx
247 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
248 ; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
249 ; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi
250 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
251 ; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %edi
252 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %edx
253 ; X86-HAVE-BMI2-NO-SHLD-NEXT: testb $32, %cl
254 ; X86-HAVE-BMI2-NO-SHLD-NEXT: cmovel %edi, %edx
255 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movb %dl, (%eax)
256 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
257 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
258 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
259 ; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
261 ; X86-HAVE-BMI2-HAVE-SHLD-LABEL: load_1byte_chunk_of_8byte_alloca:
262 ; X86-HAVE-BMI2-HAVE-SHLD: # %bb.0:
263 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %ebx
264 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi
265 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
266 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
267 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
268 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
269 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
270 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx
271 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
272 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi
273 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx
274 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %ebx
275 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: testb $32, %cl
276 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: cmovel %edx, %ebx
277 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movb %bl, (%eax)
278 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi
279 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %ebx
280 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: retl
281 %init = load <8 x i8>, ptr %src, align 1
282 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
283 %intermediate.val.frozen = freeze <8 x i8> %init
284 %intermediate.val.frozen.bits = bitcast <8 x i8> %intermediate.val.frozen to i64
285 %intermediate.val.frozen.bits.positioned = lshr i64 %intermediate.val.frozen.bits, %byteOff.numbits
286 %intermediate.val.frozen.bits.positioned.extracted = trunc i64 %intermediate.val.frozen.bits.positioned to i8
287 %1 = insertelement <1 x i8> poison, i8 %intermediate.val.frozen.bits.positioned.extracted, i64 0
288 store <1 x i8> %1, ptr %dst, align 1
292 define void @load_2byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
293 ; X64-NO-BMI2-LABEL: load_2byte_chunk_of_8byte_alloca:
294 ; X64-NO-BMI2: # %bb.0:
295 ; X64-NO-BMI2-NEXT: movq (%rdi), %rax
296 ; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
297 ; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
298 ; X64-NO-BMI2-NEXT: shrq %cl, %rax
299 ; X64-NO-BMI2-NEXT: movw %ax, (%rdx)
300 ; X64-NO-BMI2-NEXT: retq
302 ; X64-BMI2-LABEL: load_2byte_chunk_of_8byte_alloca:
304 ; X64-BMI2-NEXT: shll $3, %esi
305 ; X64-BMI2-NEXT: shrxq %rsi, (%rdi), %rax
306 ; X64-BMI2-NEXT: movw %ax, (%rdx)
307 ; X64-BMI2-NEXT: retq
309 ; X86-NO-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_8byte_alloca:
310 ; X86-NO-BMI2-NO-SHLD: # %bb.0:
311 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
312 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
313 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
314 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
315 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
316 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
317 ; X86-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
318 ; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax
319 ; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm0, %edi
320 ; X86-NO-BMI2-NO-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
321 ; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm0, %esi
322 ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
323 ; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
324 ; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
325 ; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebx
326 ; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
327 ; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
328 ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
329 ; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
330 ; X86-NO-BMI2-NO-SHLD-NEXT: testb $32, %al
331 ; X86-NO-BMI2-NO-SHLD-NEXT: cmovel %ebx, %esi
332 ; X86-NO-BMI2-NO-SHLD-NEXT: movw %si, (%edx)
333 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
334 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
335 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
336 ; X86-NO-BMI2-NO-SHLD-NEXT: retl
338 ; X86-NO-BMI2-HAVE-SHLD-LABEL: load_2byte_chunk_of_8byte_alloca:
339 ; X86-NO-BMI2-HAVE-SHLD: # %bb.0:
340 ; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %esi
341 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
342 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
343 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
344 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
345 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
346 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx
347 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
348 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi
349 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx
350 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %esi
351 ; X86-NO-BMI2-HAVE-SHLD-NEXT: testb $32, %cl
352 ; X86-NO-BMI2-HAVE-SHLD-NEXT: cmovel %edx, %esi
353 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movw %si, (%eax)
354 ; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %esi
355 ; X86-NO-BMI2-HAVE-SHLD-NEXT: retl
357 ; X86-HAVE-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_8byte_alloca:
358 ; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
359 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
360 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
361 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
362 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
363 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
364 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
365 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
366 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
367 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm0, %edx
368 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
369 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm0, %esi
370 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx
371 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
372 ; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
373 ; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi
374 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
375 ; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %edi
376 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %edx
377 ; X86-HAVE-BMI2-NO-SHLD-NEXT: testb $32, %cl
378 ; X86-HAVE-BMI2-NO-SHLD-NEXT: cmovel %edi, %edx
379 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movw %dx, (%eax)
380 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
381 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
382 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
383 ; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
385 ; X86-HAVE-BMI2-HAVE-SHLD-LABEL: load_2byte_chunk_of_8byte_alloca:
386 ; X86-HAVE-BMI2-HAVE-SHLD: # %bb.0:
387 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi
388 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
389 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
390 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
391 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
392 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
393 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx
394 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
395 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi
396 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx
397 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %esi
398 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: testb $32, %cl
399 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: cmovel %edx, %esi
400 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movw %si, (%eax)
401 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi
402 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: retl
403 %init = load <8 x i8>, ptr %src, align 1
404 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
405 %intermediate.val.frozen = freeze <8 x i8> %init
406 %intermediate.val.frozen.bits = bitcast <8 x i8> %intermediate.val.frozen to i64
407 %intermediate.val.frozen.bits.positioned = lshr i64 %intermediate.val.frozen.bits, %byteOff.numbits
408 %intermediate.val.frozen.bits.positioned.extracted = trunc i64 %intermediate.val.frozen.bits.positioned to i16
409 store i16 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 2
413 define void @load_4byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
414 ; X64-NO-BMI2-LABEL: load_4byte_chunk_of_8byte_alloca:
415 ; X64-NO-BMI2: # %bb.0:
416 ; X64-NO-BMI2-NEXT: movq (%rdi), %rax
417 ; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
418 ; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
419 ; X64-NO-BMI2-NEXT: shrq %cl, %rax
420 ; X64-NO-BMI2-NEXT: movl %eax, (%rdx)
421 ; X64-NO-BMI2-NEXT: retq
423 ; X64-BMI2-LABEL: load_4byte_chunk_of_8byte_alloca:
425 ; X64-BMI2-NEXT: shll $3, %esi
426 ; X64-BMI2-NEXT: shrxq %rsi, (%rdi), %rax
427 ; X64-BMI2-NEXT: movl %eax, (%rdx)
428 ; X64-BMI2-NEXT: retq
430 ; X86-NO-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_8byte_alloca:
431 ; X86-NO-BMI2-NO-SHLD: # %bb.0:
432 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
433 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
434 ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
435 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
436 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
437 ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
438 ; X86-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
439 ; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax
440 ; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm0, %edi
441 ; X86-NO-BMI2-NO-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
442 ; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm0, %esi
443 ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
444 ; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
445 ; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
446 ; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebx
447 ; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
448 ; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
449 ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
450 ; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
451 ; X86-NO-BMI2-NO-SHLD-NEXT: testb $32, %al
452 ; X86-NO-BMI2-NO-SHLD-NEXT: cmovel %ebx, %esi
453 ; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, (%edx)
454 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
455 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
456 ; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
457 ; X86-NO-BMI2-NO-SHLD-NEXT: retl
459 ; X86-NO-BMI2-HAVE-SHLD-LABEL: load_4byte_chunk_of_8byte_alloca:
460 ; X86-NO-BMI2-HAVE-SHLD: # %bb.0:
461 ; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %esi
462 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
463 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
464 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
465 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
466 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
467 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx
468 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
469 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi
470 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx
471 ; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %esi
472 ; X86-NO-BMI2-HAVE-SHLD-NEXT: testb $32, %cl
473 ; X86-NO-BMI2-HAVE-SHLD-NEXT: cmovel %edx, %esi
474 ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, (%eax)
475 ; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %esi
476 ; X86-NO-BMI2-HAVE-SHLD-NEXT: retl
478 ; X86-HAVE-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_8byte_alloca:
479 ; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
480 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
481 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
482 ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
483 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
484 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
485 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
486 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
487 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
488 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm0, %edx
489 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
490 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm0, %esi
491 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx
492 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
493 ; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
494 ; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi
495 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
496 ; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %edi
497 ; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %edx
498 ; X86-HAVE-BMI2-NO-SHLD-NEXT: testb $32, %cl
499 ; X86-HAVE-BMI2-NO-SHLD-NEXT: cmovel %edi, %edx
500 ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%eax)
501 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
502 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
503 ; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
504 ; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
506 ; X86-HAVE-BMI2-HAVE-SHLD-LABEL: load_4byte_chunk_of_8byte_alloca:
507 ; X86-HAVE-BMI2-HAVE-SHLD: # %bb.0:
508 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi
509 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
510 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
511 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
512 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
513 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
514 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx
515 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
516 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi
517 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx
518 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %esi
519 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: testb $32, %cl
520 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: cmovel %edx, %esi
521 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, (%eax)
522 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi
523 ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: retl
524 %init = load <8 x i8>, ptr %src, align 1
525 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
526 %intermediate.val.frozen = freeze <8 x i8> %init
527 %intermediate.val.frozen.bits = bitcast <8 x i8> %intermediate.val.frozen to i64
528 %intermediate.val.frozen.bits.positioned = lshr i64 %intermediate.val.frozen.bits, %byteOff.numbits
529 %intermediate.val.frozen.bits.positioned.extracted = trunc i64 %intermediate.val.frozen.bits.positioned to i32
530 store i32 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 4
534 ; no @load_8byte_chunk_of_8byte_alloca
536 define void @load_1byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
537 ; X64-NO-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca:
538 ; X64-NO-BMI2-NO-SHLD: # %bb.0:
539 ; X64-NO-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0
540 ; X64-NO-BMI2-NO-SHLD-NEXT: shll $3, %esi
541 ; X64-NO-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
542 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax
543 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm0, %rdi
544 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
545 ; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rdi
546 ; X64-NO-BMI2-NO-SHLD-NEXT: notb %cl
547 ; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r8
548 ; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r8
549 ; X64-NO-BMI2-NO-SHLD-NEXT: orq %rdi, %r8
550 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
551 ; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rax
552 ; X64-NO-BMI2-NO-SHLD-NEXT: testb $64, %sil
553 ; X64-NO-BMI2-NO-SHLD-NEXT: cmoveq %r8, %rax
554 ; X64-NO-BMI2-NO-SHLD-NEXT: movb %al, (%rdx)
555 ; X64-NO-BMI2-NO-SHLD-NEXT: retq
557 ; X64-NO-BMI2-HAVE-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca:
558 ; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
559 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx
560 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0
561 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
562 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax
563 ; X64-NO-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
564 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi
565 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
566 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %rsi
567 ; X64-NO-BMI2-HAVE-SHLD-NEXT: testb $64, %cl
568 ; X64-NO-BMI2-HAVE-SHLD-NEXT: cmoveq %rax, %rsi
569 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movb %sil, (%rdx)
570 ; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
572 ; X64-HAVE-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca:
573 ; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
574 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0
575 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %esi
576 ; X64-HAVE-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
577 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax
578 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm0, %rcx
579 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rcx, %rcx
580 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %edi
581 ; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %dil
582 ; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r8
583 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rdi, %r8, %rdi
584 ; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rdi
585 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rax, %rax
586 ; X64-HAVE-BMI2-NO-SHLD-NEXT: testb $64, %sil
587 ; X64-HAVE-BMI2-NO-SHLD-NEXT: cmoveq %rdi, %rax
588 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movb %al, (%rdx)
589 ; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
591 ; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca:
592 ; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
593 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx
594 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0
595 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
596 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax
597 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
598 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi
599 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
600 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %rsi, %rsi
601 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: testb $64, %cl
602 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: cmoveq %rax, %rsi
603 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movb %sil, (%rdx)
604 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
606 ; X86-LABEL: load_1byte_chunk_of_16byte_alloca:
608 ; X86-NEXT: subl $32, %esp
609 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
610 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
611 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
612 ; X86-NEXT: movdqu (%edx), %xmm0
613 ; X86-NEXT: shll $3, %ecx
614 ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
615 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
616 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
617 ; X86-NEXT: movd %xmm0, (%esp)
618 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
619 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
620 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
621 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
622 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
623 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
624 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
625 ; X86-NEXT: shrb $3, %cl
626 ; X86-NEXT: andb $15, %cl
627 ; X86-NEXT: movzbl %cl, %ecx
628 ; X86-NEXT: movzbl (%esp,%ecx), %ecx
629 ; X86-NEXT: movb %cl, (%eax)
630 ; X86-NEXT: addl $32, %esp
632 %init = load <16 x i8>, ptr %src, align 1
633 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
634 %intermediate.val.frozen = freeze <16 x i8> %init
635 %intermediate.val.frozen.bits = bitcast <16 x i8> %intermediate.val.frozen to i128
636 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i128
637 %intermediate.val.frozen.bits.positioned = lshr i128 %intermediate.val.frozen.bits, %byteOff.numbits.wide
638 %intermediate.val.frozen.bits.positioned.extracted = trunc i128 %intermediate.val.frozen.bits.positioned to i8
639 %1 = insertelement <1 x i8> poison, i8 %intermediate.val.frozen.bits.positioned.extracted, i64 0
640 store <1 x i8> %1, ptr %dst, align 1
644 define void @load_2byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
645 ; X64-NO-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca:
646 ; X64-NO-BMI2-NO-SHLD: # %bb.0:
647 ; X64-NO-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0
648 ; X64-NO-BMI2-NO-SHLD-NEXT: shll $3, %esi
649 ; X64-NO-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
650 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax
651 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm0, %rdi
652 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
653 ; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rdi
654 ; X64-NO-BMI2-NO-SHLD-NEXT: notb %cl
655 ; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r8
656 ; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r8
657 ; X64-NO-BMI2-NO-SHLD-NEXT: orq %rdi, %r8
658 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
659 ; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rax
660 ; X64-NO-BMI2-NO-SHLD-NEXT: testb $64, %sil
661 ; X64-NO-BMI2-NO-SHLD-NEXT: cmoveq %r8, %rax
662 ; X64-NO-BMI2-NO-SHLD-NEXT: movw %ax, (%rdx)
663 ; X64-NO-BMI2-NO-SHLD-NEXT: retq
665 ; X64-NO-BMI2-HAVE-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca:
666 ; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
667 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx
668 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0
669 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
670 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax
671 ; X64-NO-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
672 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi
673 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
674 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %rsi
675 ; X64-NO-BMI2-HAVE-SHLD-NEXT: testb $64, %cl
676 ; X64-NO-BMI2-HAVE-SHLD-NEXT: cmoveq %rax, %rsi
677 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movw %si, (%rdx)
678 ; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
680 ; X64-HAVE-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca:
681 ; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
682 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0
683 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %esi
684 ; X64-HAVE-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
685 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax
686 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm0, %rcx
687 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rcx, %rcx
688 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %edi
689 ; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %dil
690 ; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r8
691 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rdi, %r8, %rdi
692 ; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rdi
693 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rax, %rax
694 ; X64-HAVE-BMI2-NO-SHLD-NEXT: testb $64, %sil
695 ; X64-HAVE-BMI2-NO-SHLD-NEXT: cmoveq %rdi, %rax
696 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movw %ax, (%rdx)
697 ; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
699 ; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca:
700 ; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
701 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx
702 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0
703 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
704 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax
705 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
706 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi
707 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
708 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %rsi, %rsi
709 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: testb $64, %cl
710 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: cmoveq %rax, %rsi
711 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movw %si, (%rdx)
712 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
714 ; X86-LABEL: load_2byte_chunk_of_16byte_alloca:
716 ; X86-NEXT: subl $32, %esp
717 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
718 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
719 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
720 ; X86-NEXT: movdqu (%edx), %xmm0
721 ; X86-NEXT: shll $3, %ecx
722 ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
723 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
724 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
725 ; X86-NEXT: movd %xmm0, (%esp)
726 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
727 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
728 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
729 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
730 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
731 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
732 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
733 ; X86-NEXT: shrb $3, %cl
734 ; X86-NEXT: andb $15, %cl
735 ; X86-NEXT: movzbl %cl, %ecx
736 ; X86-NEXT: movl (%esp,%ecx), %ecx
737 ; X86-NEXT: movw %cx, (%eax)
738 ; X86-NEXT: addl $32, %esp
740 %init = load <16 x i8>, ptr %src, align 1
741 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
742 %intermediate.val.frozen = freeze <16 x i8> %init
743 %intermediate.val.frozen.bits = bitcast <16 x i8> %intermediate.val.frozen to i128
744 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i128
745 %intermediate.val.frozen.bits.positioned = lshr i128 %intermediate.val.frozen.bits, %byteOff.numbits.wide
746 %intermediate.val.frozen.bits.positioned.extracted = trunc i128 %intermediate.val.frozen.bits.positioned to i16
747 store i16 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 2
751 define void @load_4byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
752 ; X64-NO-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca:
753 ; X64-NO-BMI2-NO-SHLD: # %bb.0:
754 ; X64-NO-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0
755 ; X64-NO-BMI2-NO-SHLD-NEXT: shll $3, %esi
756 ; X64-NO-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
757 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax
758 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm0, %rdi
759 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
760 ; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rdi
761 ; X64-NO-BMI2-NO-SHLD-NEXT: notb %cl
762 ; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r8
763 ; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r8
764 ; X64-NO-BMI2-NO-SHLD-NEXT: orq %rdi, %r8
765 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
766 ; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rax
767 ; X64-NO-BMI2-NO-SHLD-NEXT: testb $64, %sil
768 ; X64-NO-BMI2-NO-SHLD-NEXT: cmoveq %r8, %rax
769 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, (%rdx)
770 ; X64-NO-BMI2-NO-SHLD-NEXT: retq
772 ; X64-NO-BMI2-HAVE-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca:
773 ; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
774 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx
775 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0
776 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
777 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax
778 ; X64-NO-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
779 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi
780 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
781 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %rsi
782 ; X64-NO-BMI2-HAVE-SHLD-NEXT: testb $64, %cl
783 ; X64-NO-BMI2-HAVE-SHLD-NEXT: cmoveq %rax, %rsi
784 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, (%rdx)
785 ; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
787 ; X64-HAVE-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca:
788 ; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
789 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0
790 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %esi
791 ; X64-HAVE-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
792 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax
793 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm0, %rcx
794 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rcx, %rcx
795 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %edi
796 ; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %dil
797 ; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r8
798 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rdi, %r8, %rdi
799 ; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rdi
800 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rax, %rax
801 ; X64-HAVE-BMI2-NO-SHLD-NEXT: testb $64, %sil
802 ; X64-HAVE-BMI2-NO-SHLD-NEXT: cmoveq %rdi, %rax
803 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%rdx)
804 ; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
806 ; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca:
807 ; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
808 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx
809 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0
810 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
811 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax
812 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
813 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi
814 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
815 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %rsi, %rsi
816 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: testb $64, %cl
817 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: cmoveq %rax, %rsi
818 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, (%rdx)
819 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
821 ; X86-LABEL: load_4byte_chunk_of_16byte_alloca:
823 ; X86-NEXT: subl $32, %esp
824 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
825 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
826 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
827 ; X86-NEXT: movdqu (%edx), %xmm0
828 ; X86-NEXT: shll $3, %ecx
829 ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
830 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
831 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
832 ; X86-NEXT: movd %xmm0, (%esp)
833 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
834 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
835 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
836 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
837 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
838 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
839 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
840 ; X86-NEXT: shrb $3, %cl
841 ; X86-NEXT: andb $15, %cl
842 ; X86-NEXT: movzbl %cl, %ecx
843 ; X86-NEXT: movl (%esp,%ecx), %ecx
844 ; X86-NEXT: movl %ecx, (%eax)
845 ; X86-NEXT: addl $32, %esp
847 %init = load <16 x i8>, ptr %src, align 1
848 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
849 %intermediate.val.frozen = freeze <16 x i8> %init
850 %intermediate.val.frozen.bits = bitcast <16 x i8> %intermediate.val.frozen to i128
851 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i128
852 %intermediate.val.frozen.bits.positioned = lshr i128 %intermediate.val.frozen.bits, %byteOff.numbits.wide
853 %intermediate.val.frozen.bits.positioned.extracted = trunc i128 %intermediate.val.frozen.bits.positioned to i32
854 store i32 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 4
858 define void @load_8byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
859 ; X64-NO-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca:
860 ; X64-NO-BMI2-NO-SHLD: # %bb.0:
861 ; X64-NO-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0
862 ; X64-NO-BMI2-NO-SHLD-NEXT: shll $3, %esi
863 ; X64-NO-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
864 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax
865 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm0, %rdi
866 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
867 ; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rdi
868 ; X64-NO-BMI2-NO-SHLD-NEXT: notb %cl
869 ; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r8
870 ; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r8
871 ; X64-NO-BMI2-NO-SHLD-NEXT: orq %rdi, %r8
872 ; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
873 ; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rax
874 ; X64-NO-BMI2-NO-SHLD-NEXT: testb $64, %sil
875 ; X64-NO-BMI2-NO-SHLD-NEXT: cmoveq %r8, %rax
876 ; X64-NO-BMI2-NO-SHLD-NEXT: movq %rax, (%rdx)
877 ; X64-NO-BMI2-NO-SHLD-NEXT: retq
879 ; X64-NO-BMI2-HAVE-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca:
880 ; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
881 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx
882 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0
883 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
884 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax
885 ; X64-NO-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
886 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi
887 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
888 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %rsi
889 ; X64-NO-BMI2-HAVE-SHLD-NEXT: testb $64, %cl
890 ; X64-NO-BMI2-HAVE-SHLD-NEXT: cmoveq %rax, %rsi
891 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, (%rdx)
892 ; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
894 ; X64-HAVE-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca:
895 ; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
896 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0
897 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %esi
898 ; X64-HAVE-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
899 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax
900 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm0, %rcx
901 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rcx, %rcx
902 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %edi
903 ; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %dil
904 ; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r8
905 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rdi, %r8, %rdi
906 ; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rdi
907 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rax, %rax
908 ; X64-HAVE-BMI2-NO-SHLD-NEXT: testb $64, %sil
909 ; X64-HAVE-BMI2-NO-SHLD-NEXT: cmoveq %rdi, %rax
910 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, (%rdx)
911 ; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
913 ; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca:
914 ; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
915 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx
916 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0
917 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx
918 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax
919 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
920 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi
921 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
922 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %rsi, %rsi
923 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: testb $64, %cl
924 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: cmoveq %rax, %rsi
925 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, (%rdx)
926 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
928 ; X86-LABEL: load_8byte_chunk_of_16byte_alloca:
930 ; X86-NEXT: subl $32, %esp
931 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
932 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
933 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
934 ; X86-NEXT: movdqu (%edx), %xmm0
935 ; X86-NEXT: shll $3, %ecx
936 ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
937 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
938 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
939 ; X86-NEXT: movd %xmm0, (%esp)
940 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
941 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
942 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
943 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
944 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
945 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
946 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
947 ; X86-NEXT: shrb $3, %cl
948 ; X86-NEXT: andb $15, %cl
949 ; X86-NEXT: movzbl %cl, %ecx
950 ; X86-NEXT: movl (%esp,%ecx), %edx
951 ; X86-NEXT: movl 4(%esp,%ecx), %ecx
952 ; X86-NEXT: movl %ecx, 4(%eax)
953 ; X86-NEXT: movl %edx, (%eax)
954 ; X86-NEXT: addl $32, %esp
956 %init = load <16 x i8>, ptr %src, align 1
957 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
958 %intermediate.val.frozen = freeze <16 x i8> %init
959 %intermediate.val.frozen.bits = bitcast <16 x i8> %intermediate.val.frozen to i128
960 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i128
961 %intermediate.val.frozen.bits.positioned = lshr i128 %intermediate.val.frozen.bits, %byteOff.numbits.wide
962 %intermediate.val.frozen.bits.positioned.extracted = trunc i128 %intermediate.val.frozen.bits.positioned to i64
963 store i64 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 8
967 ; no @load_16byte_chunk_of_16byte_alloca
969 define void @load_1byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
970 ; X64-LABEL: load_1byte_chunk_of_32byte_alloca:
972 ; X64-NEXT: movdqu (%rdi), %xmm0
973 ; X64-NEXT: movdqu 16(%rdi), %xmm1
974 ; X64-NEXT: shll $3, %esi
975 ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
976 ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
977 ; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
978 ; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
979 ; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
980 ; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
981 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
982 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
983 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
984 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
985 ; X64-NEXT: shrb $3, %sil
986 ; X64-NEXT: movzbl %sil, %eax
987 ; X64-NEXT: movzbl -64(%rsp,%rax), %eax
988 ; X64-NEXT: movb %al, (%rdx)
991 ; X86-LABEL: load_1byte_chunk_of_32byte_alloca:
993 ; X86-NEXT: subl $64, %esp
994 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
995 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
996 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
997 ; X86-NEXT: movdqu (%edx), %xmm0
998 ; X86-NEXT: movdqu 16(%edx), %xmm1
999 ; X86-NEXT: shll $3, %ecx
1000 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
1001 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
1002 ; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
1003 ; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
1004 ; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
1005 ; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
1006 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
1007 ; X86-NEXT: movd %xmm0, (%esp)
1008 ; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
1009 ; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
1010 ; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
1011 ; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
1012 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
1013 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
1014 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1015 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1016 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1017 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1018 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1019 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1020 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1021 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1022 ; X86-NEXT: shrb $3, %cl
1023 ; X86-NEXT: movzbl %cl, %ecx
1024 ; X86-NEXT: movzbl (%esp,%ecx), %ecx
1025 ; X86-NEXT: movb %cl, (%eax)
1026 ; X86-NEXT: addl $64, %esp
1028 %init = load <32 x i8>, ptr %src, align 1
1029 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
1030 %intermediate.val.frozen = freeze <32 x i8> %init
1031 %intermediate.val.frozen.bits = bitcast <32 x i8> %intermediate.val.frozen to i256
1032 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i256
1033 %intermediate.val.frozen.bits.positioned = lshr i256 %intermediate.val.frozen.bits, %byteOff.numbits.wide
1034 %intermediate.val.frozen.bits.positioned.extracted = trunc i256 %intermediate.val.frozen.bits.positioned to i8
1035 %1 = insertelement <1 x i8> poison, i8 %intermediate.val.frozen.bits.positioned.extracted, i64 0
1036 store <1 x i8> %1, ptr %dst, align 1
1040 define void @load_2byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
1041 ; X64-LABEL: load_2byte_chunk_of_32byte_alloca:
1043 ; X64-NEXT: movdqu (%rdi), %xmm0
1044 ; X64-NEXT: movdqu 16(%rdi), %xmm1
1045 ; X64-NEXT: shll $3, %esi
1046 ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
1047 ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
1048 ; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
1049 ; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
1050 ; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
1051 ; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
1052 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1053 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1054 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1055 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1056 ; X64-NEXT: shrb $3, %sil
1057 ; X64-NEXT: movzbl %sil, %eax
1058 ; X64-NEXT: movq -64(%rsp,%rax), %rax
1059 ; X64-NEXT: movw %ax, (%rdx)
1062 ; X86-LABEL: load_2byte_chunk_of_32byte_alloca:
1064 ; X86-NEXT: subl $64, %esp
1065 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1066 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1067 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1068 ; X86-NEXT: movdqu (%edx), %xmm0
1069 ; X86-NEXT: movdqu 16(%edx), %xmm1
1070 ; X86-NEXT: shll $3, %ecx
1071 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
1072 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
1073 ; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
1074 ; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
1075 ; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
1076 ; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
1077 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
1078 ; X86-NEXT: movd %xmm0, (%esp)
1079 ; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
1080 ; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
1081 ; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
1082 ; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
1083 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
1084 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
1085 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1086 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1087 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1088 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1089 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1090 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1091 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1092 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1093 ; X86-NEXT: shrb $3, %cl
1094 ; X86-NEXT: movzbl %cl, %ecx
1095 ; X86-NEXT: movl (%esp,%ecx), %ecx
1096 ; X86-NEXT: movw %cx, (%eax)
1097 ; X86-NEXT: addl $64, %esp
1099 %init = load <32 x i8>, ptr %src, align 1
1100 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
1101 %intermediate.val.frozen = freeze <32 x i8> %init
1102 %intermediate.val.frozen.bits = bitcast <32 x i8> %intermediate.val.frozen to i256
1103 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i256
1104 %intermediate.val.frozen.bits.positioned = lshr i256 %intermediate.val.frozen.bits, %byteOff.numbits.wide
1105 %intermediate.val.frozen.bits.positioned.extracted = trunc i256 %intermediate.val.frozen.bits.positioned to i16
1106 store i16 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 2
1110 define void @load_4byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
1111 ; X64-LABEL: load_4byte_chunk_of_32byte_alloca:
1113 ; X64-NEXT: movdqu (%rdi), %xmm0
1114 ; X64-NEXT: movdqu 16(%rdi), %xmm1
1115 ; X64-NEXT: shll $3, %esi
1116 ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
1117 ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
1118 ; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
1119 ; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
1120 ; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
1121 ; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
1122 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1123 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1124 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1125 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1126 ; X64-NEXT: shrb $3, %sil
1127 ; X64-NEXT: movzbl %sil, %eax
1128 ; X64-NEXT: movl -64(%rsp,%rax), %eax
1129 ; X64-NEXT: movl %eax, (%rdx)
1132 ; X86-LABEL: load_4byte_chunk_of_32byte_alloca:
1134 ; X86-NEXT: subl $64, %esp
1135 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1136 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1137 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1138 ; X86-NEXT: movdqu (%edx), %xmm0
1139 ; X86-NEXT: movdqu 16(%edx), %xmm1
1140 ; X86-NEXT: shll $3, %ecx
1141 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
1142 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
1143 ; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
1144 ; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
1145 ; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
1146 ; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
1147 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
1148 ; X86-NEXT: movd %xmm0, (%esp)
1149 ; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
1150 ; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
1151 ; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
1152 ; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
1153 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
1154 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
1155 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1156 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1157 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1158 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1159 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1160 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1161 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1162 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1163 ; X86-NEXT: shrb $3, %cl
1164 ; X86-NEXT: movzbl %cl, %ecx
1165 ; X86-NEXT: movl (%esp,%ecx), %ecx
1166 ; X86-NEXT: movl %ecx, (%eax)
1167 ; X86-NEXT: addl $64, %esp
1169 %init = load <32 x i8>, ptr %src, align 1
1170 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
1171 %intermediate.val.frozen = freeze <32 x i8> %init
1172 %intermediate.val.frozen.bits = bitcast <32 x i8> %intermediate.val.frozen to i256
1173 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i256
1174 %intermediate.val.frozen.bits.positioned = lshr i256 %intermediate.val.frozen.bits, %byteOff.numbits.wide
1175 %intermediate.val.frozen.bits.positioned.extracted = trunc i256 %intermediate.val.frozen.bits.positioned to i32
1176 store i32 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 4
1180 define void @load_8byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
1181 ; X64-LABEL: load_8byte_chunk_of_32byte_alloca:
1183 ; X64-NEXT: movdqu (%rdi), %xmm0
1184 ; X64-NEXT: movdqu 16(%rdi), %xmm1
1185 ; X64-NEXT: shll $3, %esi
1186 ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
1187 ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
1188 ; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
1189 ; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
1190 ; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
1191 ; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
1192 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1193 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1194 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1195 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1196 ; X64-NEXT: shrb $3, %sil
1197 ; X64-NEXT: movzbl %sil, %eax
1198 ; X64-NEXT: movq -64(%rsp,%rax), %rax
1199 ; X64-NEXT: movq %rax, (%rdx)
1202 ; X86-LABEL: load_8byte_chunk_of_32byte_alloca:
1204 ; X86-NEXT: subl $64, %esp
1205 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1206 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1207 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1208 ; X86-NEXT: movdqu (%edx), %xmm0
1209 ; X86-NEXT: movdqu 16(%edx), %xmm1
1210 ; X86-NEXT: shll $3, %ecx
1211 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
1212 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
1213 ; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
1214 ; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
1215 ; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
1216 ; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
1217 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
1218 ; X86-NEXT: movd %xmm0, (%esp)
1219 ; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
1220 ; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
1221 ; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
1222 ; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
1223 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
1224 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
1225 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1226 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1227 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1228 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1229 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1230 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1231 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1232 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1233 ; X86-NEXT: shrb $3, %cl
1234 ; X86-NEXT: movzbl %cl, %ecx
1235 ; X86-NEXT: movl (%esp,%ecx), %edx
1236 ; X86-NEXT: movl 4(%esp,%ecx), %ecx
1237 ; X86-NEXT: movl %ecx, 4(%eax)
1238 ; X86-NEXT: movl %edx, (%eax)
1239 ; X86-NEXT: addl $64, %esp
1241 %init = load <32 x i8>, ptr %src, align 1
1242 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
1243 %intermediate.val.frozen = freeze <32 x i8> %init
1244 %intermediate.val.frozen.bits = bitcast <32 x i8> %intermediate.val.frozen to i256
1245 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i256
1246 %intermediate.val.frozen.bits.positioned = lshr i256 %intermediate.val.frozen.bits, %byteOff.numbits.wide
1247 %intermediate.val.frozen.bits.positioned.extracted = trunc i256 %intermediate.val.frozen.bits.positioned to i64
1248 store i64 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 8
1252 define void @load_16byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
1253 ; X64-LABEL: load_16byte_chunk_of_32byte_alloca:
1255 ; X64-NEXT: movdqu (%rdi), %xmm0
1256 ; X64-NEXT: movdqu 16(%rdi), %xmm1
1257 ; X64-NEXT: shll $3, %esi
1258 ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
1259 ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
1260 ; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
1261 ; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
1262 ; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
1263 ; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
1264 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1265 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1266 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1267 ; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
1268 ; X64-NEXT: shrb $3, %sil
1269 ; X64-NEXT: movzbl %sil, %eax
1270 ; X64-NEXT: movq -64(%rsp,%rax), %rcx
1271 ; X64-NEXT: movq -56(%rsp,%rax), %rax
1272 ; X64-NEXT: movq %rax, 8(%rdx)
1273 ; X64-NEXT: movq %rcx, (%rdx)
1276 ; X86-LABEL: load_16byte_chunk_of_32byte_alloca:
1278 ; X86-NEXT: pushl %edi
1279 ; X86-NEXT: pushl %esi
1280 ; X86-NEXT: subl $64, %esp
1281 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1282 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1283 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1284 ; X86-NEXT: movdqu (%edx), %xmm0
1285 ; X86-NEXT: movdqu 16(%edx), %xmm1
1286 ; X86-NEXT: shll $3, %ecx
1287 ; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
1288 ; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
1289 ; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
1290 ; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
1291 ; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
1292 ; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
1293 ; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
1294 ; X86-NEXT: movd %xmm0, (%esp)
1295 ; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
1296 ; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
1297 ; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
1298 ; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
1299 ; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
1300 ; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
1301 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1302 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1303 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1304 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1305 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1306 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1307 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1308 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
1309 ; X86-NEXT: shrb $3, %cl
1310 ; X86-NEXT: movzbl %cl, %ecx
1311 ; X86-NEXT: movl (%esp,%ecx), %edx
1312 ; X86-NEXT: movl 4(%esp,%ecx), %esi
1313 ; X86-NEXT: movl 8(%esp,%ecx), %edi
1314 ; X86-NEXT: movl 12(%esp,%ecx), %ecx
1315 ; X86-NEXT: movl %ecx, 12(%eax)
1316 ; X86-NEXT: movl %edi, 8(%eax)
1317 ; X86-NEXT: movl %esi, 4(%eax)
1318 ; X86-NEXT: movl %edx, (%eax)
1319 ; X86-NEXT: addl $64, %esp
1320 ; X86-NEXT: popl %esi
1321 ; X86-NEXT: popl %edi
1323 %init = load <32 x i8>, ptr %src, align 1
1324 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
1325 %intermediate.val.frozen = freeze <32 x i8> %init
1326 %intermediate.val.frozen.bits = bitcast <32 x i8> %intermediate.val.frozen to i256
1327 %byteOff.numbits.wide = zext i64 %byteOff.numbits to i256
1328 %intermediate.val.frozen.bits.positioned = lshr i256 %intermediate.val.frozen.bits, %byteOff.numbits.wide
1329 %intermediate.val.frozen.bits.positioned.extracted = trunc i256 %intermediate.val.frozen.bits.positioned to i128
1330 store i128 %intermediate.val.frozen.bits.positioned.extracted, ptr %dst, align 16
1334 ; no @load_32byte_chunk_of_32byte_alloca
1335 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
1337 ; X86-NO-SHLD: {{.*}}
1339 ; X64-NO-SHLD: {{.*}}