1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=sse2,-sse4.2 | FileCheck %s --check-prefixes=GPR,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=sse4.2,-avx | FileCheck %s --check-prefixes=GPR,SSE4
4 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=avx,-avx512f | FileCheck %s --check-prefixes=GPR,AVX
5 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=avx512f | FileCheck %s --check-prefixes=GPR,AVX512
7 declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
8 declare void @llvm.memset.inline.p0.i64(ptr nocapture, i8, i64, i1) nounwind
10 ; /////////////////////////////////////////////////////////////////////////////
12 define void @memset_1(ptr %a, i8 %value) nounwind {
13 ; GPR-LABEL: memset_1:
15 ; GPR-NEXT: movb %sil, (%rdi)
17 tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 1, i1 0)
21 define void @memset_2(ptr %a, i8 %value) nounwind {
22 ; GPR-LABEL: memset_2:
24 ; GPR-NEXT: movzbl %sil, %eax
25 ; GPR-NEXT: shll $8, %esi
26 ; GPR-NEXT: orl %esi, %eax
27 ; GPR-NEXT: movw %ax, (%rdi)
29 tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 2, i1 0)
33 define void @memset_4(ptr %a, i8 %value) nounwind {
34 ; GPR-LABEL: memset_4:
36 ; GPR-NEXT: movzbl %sil, %eax
37 ; GPR-NEXT: imull $16843009, %eax, %eax # imm = 0x1010101
38 ; GPR-NEXT: movl %eax, (%rdi)
40 tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 4, i1 0)
44 define void @memset_8(ptr %a, i8 %value) nounwind {
45 ; GPR-LABEL: memset_8:
47 ; GPR-NEXT: movzbl %sil, %eax
48 ; GPR-NEXT: movabsq $72340172838076673, %rcx # imm = 0x101010101010101
49 ; GPR-NEXT: imulq %rax, %rcx
50 ; GPR-NEXT: movq %rcx, (%rdi)
52 tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 8, i1 0)
56 define void @memset_16(ptr %a, i8 %value) nounwind {
57 ; SSE2-LABEL: memset_16:
59 ; SSE2-NEXT: movzbl %sil, %eax
60 ; SSE2-NEXT: movabsq $72340172838076673, %rcx # imm = 0x101010101010101
61 ; SSE2-NEXT: imulq %rax, %rcx
62 ; SSE2-NEXT: movq %rcx, 8(%rdi)
63 ; SSE2-NEXT: movq %rcx, (%rdi)
66 ; SSE4-LABEL: memset_16:
68 ; SSE4-NEXT: movd %esi, %xmm0
69 ; SSE4-NEXT: pxor %xmm1, %xmm1
70 ; SSE4-NEXT: pshufb %xmm1, %xmm0
71 ; SSE4-NEXT: movdqu %xmm0, (%rdi)
74 ; AVX-LABEL: memset_16:
76 ; AVX-NEXT: vmovd %esi, %xmm0
77 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
78 ; AVX-NEXT: vpshufb %xmm1, %xmm0, %xmm0
79 ; AVX-NEXT: vmovdqu %xmm0, (%rdi)
82 ; AVX512-LABEL: memset_16:
84 ; AVX512-NEXT: vmovd %esi, %xmm0
85 ; AVX512-NEXT: vpbroadcastb %xmm0, %xmm0
86 ; AVX512-NEXT: vmovdqu %xmm0, (%rdi)
88 tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 16, i1 0)
92 define void @memset_32(ptr %a, i8 %value) nounwind {
93 ; SSE2-LABEL: memset_32:
95 ; SSE2-NEXT: movzbl %sil, %eax
96 ; SSE2-NEXT: movabsq $72340172838076673, %rcx # imm = 0x101010101010101
97 ; SSE2-NEXT: imulq %rax, %rcx
98 ; SSE2-NEXT: movq %rcx, 24(%rdi)
99 ; SSE2-NEXT: movq %rcx, 16(%rdi)
100 ; SSE2-NEXT: movq %rcx, 8(%rdi)
101 ; SSE2-NEXT: movq %rcx, (%rdi)
104 ; SSE4-LABEL: memset_32:
106 ; SSE4-NEXT: movd %esi, %xmm0
107 ; SSE4-NEXT: pxor %xmm1, %xmm1
108 ; SSE4-NEXT: pshufb %xmm1, %xmm0
109 ; SSE4-NEXT: movdqu %xmm0, 16(%rdi)
110 ; SSE4-NEXT: movdqu %xmm0, (%rdi)
113 ; AVX-LABEL: memset_32:
115 ; AVX-NEXT: vmovd %esi, %xmm0
116 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
117 ; AVX-NEXT: vpshufb %xmm1, %xmm0, %xmm0
118 ; AVX-NEXT: vmovdqu %xmm0, 16(%rdi)
119 ; AVX-NEXT: vmovdqu %xmm0, (%rdi)
122 ; AVX512-LABEL: memset_32:
124 ; AVX512-NEXT: vmovd %esi, %xmm0
125 ; AVX512-NEXT: vpbroadcastb %xmm0, %ymm0
126 ; AVX512-NEXT: vmovdqu %ymm0, (%rdi)
127 ; AVX512-NEXT: vzeroupper
129 tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 32, i1 0)
133 define void @memset_64(ptr %a, i8 %value) nounwind {
134 ; SSE2-LABEL: memset_64:
136 ; SSE2-NEXT: movzbl %sil, %eax
137 ; SSE2-NEXT: movabsq $72340172838076673, %rcx # imm = 0x101010101010101
138 ; SSE2-NEXT: imulq %rax, %rcx
139 ; SSE2-NEXT: movq %rcx, 56(%rdi)
140 ; SSE2-NEXT: movq %rcx, 48(%rdi)
141 ; SSE2-NEXT: movq %rcx, 40(%rdi)
142 ; SSE2-NEXT: movq %rcx, 32(%rdi)
143 ; SSE2-NEXT: movq %rcx, 24(%rdi)
144 ; SSE2-NEXT: movq %rcx, 16(%rdi)
145 ; SSE2-NEXT: movq %rcx, 8(%rdi)
146 ; SSE2-NEXT: movq %rcx, (%rdi)
149 ; SSE4-LABEL: memset_64:
151 ; SSE4-NEXT: movd %esi, %xmm0
152 ; SSE4-NEXT: pxor %xmm1, %xmm1
153 ; SSE4-NEXT: pshufb %xmm1, %xmm0
154 ; SSE4-NEXT: movdqu %xmm0, 48(%rdi)
155 ; SSE4-NEXT: movdqu %xmm0, 32(%rdi)
156 ; SSE4-NEXT: movdqu %xmm0, 16(%rdi)
157 ; SSE4-NEXT: movdqu %xmm0, (%rdi)
160 ; AVX-LABEL: memset_64:
162 ; AVX-NEXT: vmovd %esi, %xmm0
163 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
164 ; AVX-NEXT: vpshufb %xmm1, %xmm0, %xmm0
165 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
166 ; AVX-NEXT: vmovups %ymm0, 32(%rdi)
167 ; AVX-NEXT: vmovups %ymm0, (%rdi)
168 ; AVX-NEXT: vzeroupper
171 ; AVX512-LABEL: memset_64:
173 ; AVX512-NEXT: movzbl %sil, %eax
174 ; AVX512-NEXT: imull $16843009, %eax, %eax # imm = 0x1010101
175 ; AVX512-NEXT: vpbroadcastd %eax, %zmm0
176 ; AVX512-NEXT: vmovdqu64 %zmm0, (%rdi)
177 ; AVX512-NEXT: vzeroupper
179 tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 64, i1 0)
183 ; /////////////////////////////////////////////////////////////////////////////
185 define void @aligned_memset_16(ptr align 16 %a, i8 %value) nounwind {
186 ; SSE2-LABEL: aligned_memset_16:
188 ; SSE2-NEXT: movd %esi, %xmm0
189 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
190 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
191 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
192 ; SSE2-NEXT: movdqa %xmm0, (%rdi)
195 ; SSE4-LABEL: aligned_memset_16:
197 ; SSE4-NEXT: movd %esi, %xmm0
198 ; SSE4-NEXT: pxor %xmm1, %xmm1
199 ; SSE4-NEXT: pshufb %xmm1, %xmm0
200 ; SSE4-NEXT: movdqa %xmm0, (%rdi)
203 ; AVX-LABEL: aligned_memset_16:
205 ; AVX-NEXT: vmovd %esi, %xmm0
206 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
207 ; AVX-NEXT: vpshufb %xmm1, %xmm0, %xmm0
208 ; AVX-NEXT: vmovdqa %xmm0, (%rdi)
211 ; AVX512-LABEL: aligned_memset_16:
213 ; AVX512-NEXT: vmovd %esi, %xmm0
214 ; AVX512-NEXT: vpbroadcastb %xmm0, %xmm0
215 ; AVX512-NEXT: vmovdqa %xmm0, (%rdi)
217 tail call void @llvm.memset.inline.p0.i64(ptr align 16 %a, i8 %value, i64 16, i1 0)
221 define void @aligned_memset_32(ptr align 32 %a, i8 %value) nounwind {
222 ; SSE2-LABEL: aligned_memset_32:
224 ; SSE2-NEXT: movd %esi, %xmm0
225 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
226 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
227 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
228 ; SSE2-NEXT: movdqa %xmm0, 16(%rdi)
229 ; SSE2-NEXT: movdqa %xmm0, (%rdi)
232 ; SSE4-LABEL: aligned_memset_32:
234 ; SSE4-NEXT: movd %esi, %xmm0
235 ; SSE4-NEXT: pxor %xmm1, %xmm1
236 ; SSE4-NEXT: pshufb %xmm1, %xmm0
237 ; SSE4-NEXT: movdqa %xmm0, 16(%rdi)
238 ; SSE4-NEXT: movdqa %xmm0, (%rdi)
241 ; AVX-LABEL: aligned_memset_32:
243 ; AVX-NEXT: vmovd %esi, %xmm0
244 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
245 ; AVX-NEXT: vpshufb %xmm1, %xmm0, %xmm0
246 ; AVX-NEXT: vmovdqa %xmm0, 16(%rdi)
247 ; AVX-NEXT: vmovdqa %xmm0, (%rdi)
250 ; AVX512-LABEL: aligned_memset_32:
252 ; AVX512-NEXT: vmovd %esi, %xmm0
253 ; AVX512-NEXT: vpbroadcastb %xmm0, %ymm0
254 ; AVX512-NEXT: vmovdqa %ymm0, (%rdi)
255 ; AVX512-NEXT: vzeroupper
257 tail call void @llvm.memset.inline.p0.i64(ptr align 32 %a, i8 %value, i64 32, i1 0)
261 define void @aligned_memset_64(ptr align 64 %a, i8 %value) nounwind {
262 ; SSE2-LABEL: aligned_memset_64:
264 ; SSE2-NEXT: movd %esi, %xmm0
265 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
266 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
267 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
268 ; SSE2-NEXT: movdqa %xmm0, 48(%rdi)
269 ; SSE2-NEXT: movdqa %xmm0, 32(%rdi)
270 ; SSE2-NEXT: movdqa %xmm0, 16(%rdi)
271 ; SSE2-NEXT: movdqa %xmm0, (%rdi)
274 ; SSE4-LABEL: aligned_memset_64:
276 ; SSE4-NEXT: movd %esi, %xmm0
277 ; SSE4-NEXT: pxor %xmm1, %xmm1
278 ; SSE4-NEXT: pshufb %xmm1, %xmm0
279 ; SSE4-NEXT: movdqa %xmm0, 48(%rdi)
280 ; SSE4-NEXT: movdqa %xmm0, 32(%rdi)
281 ; SSE4-NEXT: movdqa %xmm0, 16(%rdi)
282 ; SSE4-NEXT: movdqa %xmm0, (%rdi)
285 ; AVX-LABEL: aligned_memset_64:
287 ; AVX-NEXT: vmovd %esi, %xmm0
288 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
289 ; AVX-NEXT: vpshufb %xmm1, %xmm0, %xmm0
290 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
291 ; AVX-NEXT: vmovaps %ymm0, 32(%rdi)
292 ; AVX-NEXT: vmovaps %ymm0, (%rdi)
293 ; AVX-NEXT: vzeroupper
296 ; AVX512-LABEL: aligned_memset_64:
298 ; AVX512-NEXT: movzbl %sil, %eax
299 ; AVX512-NEXT: imull $16843009, %eax, %eax # imm = 0x1010101
300 ; AVX512-NEXT: vpbroadcastd %eax, %zmm0
301 ; AVX512-NEXT: vmovdqa64 %zmm0, (%rdi)
302 ; AVX512-NEXT: vzeroupper
304 tail call void @llvm.memset.inline.p0.i64(ptr align 64 %a, i8 %value, i64 64, i1 0)
308 ; /////////////////////////////////////////////////////////////////////////////
310 define void @bzero_1(ptr %a) nounwind {
311 ; GPR-LABEL: bzero_1:
313 ; GPR-NEXT: movb $0, (%rdi)
315 tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 0, i64 1, i1 0)
319 define void @bzero_2(ptr %a) nounwind {
320 ; GPR-LABEL: bzero_2:
322 ; GPR-NEXT: movw $0, (%rdi)
324 tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 0, i64 2, i1 0)
328 define void @bzero_4(ptr %a) nounwind {
329 ; GPR-LABEL: bzero_4:
331 ; GPR-NEXT: movl $0, (%rdi)
333 tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 0, i64 4, i1 0)
337 define void @bzero_8(ptr %a) nounwind {
338 ; GPR-LABEL: bzero_8:
340 ; GPR-NEXT: movq $0, (%rdi)
342 tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 0, i64 8, i1 0)
346 define void @bzero_16(ptr %a) nounwind {
347 ; SSE2-LABEL: bzero_16:
349 ; SSE2-NEXT: movq $0, 8(%rdi)
350 ; SSE2-NEXT: movq $0, (%rdi)
353 ; SSE4-LABEL: bzero_16:
355 ; SSE4-NEXT: xorps %xmm0, %xmm0
356 ; SSE4-NEXT: movups %xmm0, (%rdi)
359 ; AVX-LABEL: bzero_16:
361 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
362 ; AVX-NEXT: vmovups %xmm0, (%rdi)
365 ; AVX512-LABEL: bzero_16:
367 ; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
368 ; AVX512-NEXT: vmovups %xmm0, (%rdi)
370 tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 0, i64 16, i1 0)
374 define void @bzero_32(ptr %a) nounwind {
375 ; SSE2-LABEL: bzero_32:
377 ; SSE2-NEXT: movq $0, 24(%rdi)
378 ; SSE2-NEXT: movq $0, 16(%rdi)
379 ; SSE2-NEXT: movq $0, 8(%rdi)
380 ; SSE2-NEXT: movq $0, (%rdi)
383 ; SSE4-LABEL: bzero_32:
385 ; SSE4-NEXT: xorps %xmm0, %xmm0
386 ; SSE4-NEXT: movups %xmm0, 16(%rdi)
387 ; SSE4-NEXT: movups %xmm0, (%rdi)
390 ; AVX-LABEL: bzero_32:
392 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
393 ; AVX-NEXT: vmovups %ymm0, (%rdi)
394 ; AVX-NEXT: vzeroupper
397 ; AVX512-LABEL: bzero_32:
399 ; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
400 ; AVX512-NEXT: vmovups %ymm0, (%rdi)
401 ; AVX512-NEXT: vzeroupper
403 tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 0, i64 32, i1 0)
407 define void @bzero_64(ptr %a) nounwind {
408 ; SSE2-LABEL: bzero_64:
410 ; SSE2-NEXT: movq $0, 56(%rdi)
411 ; SSE2-NEXT: movq $0, 48(%rdi)
412 ; SSE2-NEXT: movq $0, 40(%rdi)
413 ; SSE2-NEXT: movq $0, 32(%rdi)
414 ; SSE2-NEXT: movq $0, 24(%rdi)
415 ; SSE2-NEXT: movq $0, 16(%rdi)
416 ; SSE2-NEXT: movq $0, 8(%rdi)
417 ; SSE2-NEXT: movq $0, (%rdi)
420 ; SSE4-LABEL: bzero_64:
422 ; SSE4-NEXT: xorps %xmm0, %xmm0
423 ; SSE4-NEXT: movups %xmm0, 48(%rdi)
424 ; SSE4-NEXT: movups %xmm0, 32(%rdi)
425 ; SSE4-NEXT: movups %xmm0, 16(%rdi)
426 ; SSE4-NEXT: movups %xmm0, (%rdi)
429 ; AVX-LABEL: bzero_64:
431 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
432 ; AVX-NEXT: vmovups %ymm0, 32(%rdi)
433 ; AVX-NEXT: vmovups %ymm0, (%rdi)
434 ; AVX-NEXT: vzeroupper
437 ; AVX512-LABEL: bzero_64:
439 ; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
440 ; AVX512-NEXT: vmovups %zmm0, (%rdi)
441 ; AVX512-NEXT: vzeroupper
443 tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 0, i64 64, i1 0)
447 ; /////////////////////////////////////////////////////////////////////////////
449 define void @aligned_bzero_16(ptr %a) nounwind {
450 ; SSE2-LABEL: aligned_bzero_16:
452 ; SSE2-NEXT: xorps %xmm0, %xmm0
453 ; SSE2-NEXT: movaps %xmm0, (%rdi)
456 ; SSE4-LABEL: aligned_bzero_16:
458 ; SSE4-NEXT: xorps %xmm0, %xmm0
459 ; SSE4-NEXT: movaps %xmm0, (%rdi)
462 ; AVX-LABEL: aligned_bzero_16:
464 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
465 ; AVX-NEXT: vmovaps %xmm0, (%rdi)
468 ; AVX512-LABEL: aligned_bzero_16:
470 ; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
471 ; AVX512-NEXT: vmovaps %xmm0, (%rdi)
473 tail call void @llvm.memset.inline.p0.i64(ptr align 16 %a, i8 0, i64 16, i1 0)
477 define void @aligned_bzero_32(ptr %a) nounwind {
478 ; SSE2-LABEL: aligned_bzero_32:
480 ; SSE2-NEXT: xorps %xmm0, %xmm0
481 ; SSE2-NEXT: movaps %xmm0, 16(%rdi)
482 ; SSE2-NEXT: movaps %xmm0, (%rdi)
485 ; SSE4-LABEL: aligned_bzero_32:
487 ; SSE4-NEXT: xorps %xmm0, %xmm0
488 ; SSE4-NEXT: movaps %xmm0, 16(%rdi)
489 ; SSE4-NEXT: movaps %xmm0, (%rdi)
492 ; AVX-LABEL: aligned_bzero_32:
494 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
495 ; AVX-NEXT: vmovaps %ymm0, (%rdi)
496 ; AVX-NEXT: vzeroupper
499 ; AVX512-LABEL: aligned_bzero_32:
501 ; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
502 ; AVX512-NEXT: vmovaps %ymm0, (%rdi)
503 ; AVX512-NEXT: vzeroupper
505 tail call void @llvm.memset.inline.p0.i64(ptr align 32 %a, i8 0, i64 32, i1 0)
509 define void @aligned_bzero_64(ptr %a) nounwind {
510 ; SSE2-LABEL: aligned_bzero_64:
512 ; SSE2-NEXT: xorps %xmm0, %xmm0
513 ; SSE2-NEXT: movaps %xmm0, 48(%rdi)
514 ; SSE2-NEXT: movaps %xmm0, 32(%rdi)
515 ; SSE2-NEXT: movaps %xmm0, 16(%rdi)
516 ; SSE2-NEXT: movaps %xmm0, (%rdi)
519 ; SSE4-LABEL: aligned_bzero_64:
521 ; SSE4-NEXT: xorps %xmm0, %xmm0
522 ; SSE4-NEXT: movaps %xmm0, 48(%rdi)
523 ; SSE4-NEXT: movaps %xmm0, 32(%rdi)
524 ; SSE4-NEXT: movaps %xmm0, 16(%rdi)
525 ; SSE4-NEXT: movaps %xmm0, (%rdi)
528 ; AVX-LABEL: aligned_bzero_64:
530 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
531 ; AVX-NEXT: vmovaps %ymm0, 32(%rdi)
532 ; AVX-NEXT: vmovaps %ymm0, (%rdi)
533 ; AVX-NEXT: vzeroupper
536 ; AVX512-LABEL: aligned_bzero_64:
538 ; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
539 ; AVX512-NEXT: vmovaps %zmm0, (%rdi)
540 ; AVX512-NEXT: vzeroupper
542 tail call void @llvm.memset.inline.p0.i64(ptr align 64 %a, i8 0, i64 64, i1 0)