1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX1
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX2
6 ; This tests codegen time inlining/optimization of memcmp
9 @.str = private constant [65 x i8] c"0123456789012345678901234567890123456789012345678901234567890123\00", align 1
11 declare dso_local i32 @memcmp(ptr, ptr, i64)
12 declare dso_local i32 @bcmp(ptr, ptr, i64)
14 define i32 @length2(ptr %X, ptr %Y) nounwind !prof !14 {
17 ; X64-NEXT: movzwl (%rdi), %eax
18 ; X64-NEXT: movzwl (%rsi), %ecx
19 ; X64-NEXT: rolw $8, %ax
20 ; X64-NEXT: rolw $8, %cx
21 ; X64-NEXT: movzwl %ax, %eax
22 ; X64-NEXT: movzwl %cx, %ecx
23 ; X64-NEXT: subl %ecx, %eax
25 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 2) nounwind
29 define i1 @length2_eq(ptr %X, ptr %Y) nounwind !prof !14 {
30 ; X64-LABEL: length2_eq:
32 ; X64-NEXT: movzwl (%rdi), %eax
33 ; X64-NEXT: cmpw (%rsi), %ax
36 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 2) nounwind
37 %c = icmp eq i32 %m, 0
41 define i1 @length2_eq_const(ptr %X) nounwind !prof !14 {
42 ; X64-LABEL: length2_eq_const:
44 ; X64-NEXT: movzwl (%rdi), %eax
45 ; X64-NEXT: cmpl $12849, %eax # imm = 0x3231
48 %m = tail call i32 @memcmp(ptr %X, ptr getelementptr inbounds ([65 x i8], ptr @.str, i32 0, i32 1), i64 2) nounwind
49 %c = icmp ne i32 %m, 0
53 define i1 @length2_eq_nobuiltin_attr(ptr %X, ptr %Y) nounwind !prof !14 {
54 ; X64-LABEL: length2_eq_nobuiltin_attr:
56 ; X64-NEXT: pushq %rax
57 ; X64-NEXT: movl $2, %edx
58 ; X64-NEXT: callq memcmp
59 ; X64-NEXT: testl %eax, %eax
63 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 2) nounwind nobuiltin
64 %c = icmp eq i32 %m, 0
68 define i32 @length3(ptr %X, ptr %Y) nounwind !prof !14 {
71 ; X64-NEXT: movzwl (%rdi), %eax
72 ; X64-NEXT: movzwl (%rsi), %ecx
73 ; X64-NEXT: rolw $8, %ax
74 ; X64-NEXT: rolw $8, %cx
75 ; X64-NEXT: cmpw %cx, %ax
76 ; X64-NEXT: jne .LBB4_3
77 ; X64-NEXT: # %bb.1: # %loadbb1
78 ; X64-NEXT: movzbl 2(%rdi), %eax
79 ; X64-NEXT: movzbl 2(%rsi), %ecx
80 ; X64-NEXT: subl %ecx, %eax
82 ; X64-NEXT: .LBB4_3: # %res_block
84 ; X64-NEXT: movzbl %al, %eax
85 ; X64-NEXT: leal -1(%rax,%rax), %eax
87 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 3) nounwind
91 define i1 @length3_eq(ptr %X, ptr %Y) nounwind !prof !14 {
92 ; X64-LABEL: length3_eq:
94 ; X64-NEXT: movzwl (%rdi), %eax
95 ; X64-NEXT: xorw (%rsi), %ax
96 ; X64-NEXT: movb 2(%rdi), %cl
97 ; X64-NEXT: xorb 2(%rsi), %cl
98 ; X64-NEXT: movzbl %cl, %ecx
99 ; X64-NEXT: orw %ax, %cx
100 ; X64-NEXT: setne %al
102 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 3) nounwind
103 %c = icmp ne i32 %m, 0
107 define i32 @length4(ptr %X, ptr %Y) nounwind !prof !14 {
108 ; X64-LABEL: length4:
110 ; X64-NEXT: movl (%rdi), %ecx
111 ; X64-NEXT: movl (%rsi), %edx
112 ; X64-NEXT: bswapl %ecx
113 ; X64-NEXT: bswapl %edx
114 ; X64-NEXT: xorl %eax, %eax
115 ; X64-NEXT: cmpl %edx, %ecx
117 ; X64-NEXT: sbbl $0, %eax
119 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 4) nounwind
123 define i1 @length4_eq(ptr %X, ptr %Y) nounwind !prof !14 {
124 ; X64-LABEL: length4_eq:
126 ; X64-NEXT: movl (%rdi), %eax
127 ; X64-NEXT: cmpl (%rsi), %eax
128 ; X64-NEXT: setne %al
130 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 4) nounwind
131 %c = icmp ne i32 %m, 0
135 define i1 @length4_eq_const(ptr %X) nounwind !prof !14 {
136 ; X64-LABEL: length4_eq_const:
138 ; X64-NEXT: cmpl $875770417, (%rdi) # imm = 0x34333231
141 %m = tail call i32 @memcmp(ptr %X, ptr getelementptr inbounds ([65 x i8], ptr @.str, i32 0, i32 1), i64 4) nounwind
142 %c = icmp eq i32 %m, 0
146 define i32 @length5(ptr %X, ptr %Y) nounwind !prof !14 {
147 ; X64-LABEL: length5:
149 ; X64-NEXT: movl (%rdi), %eax
150 ; X64-NEXT: movl (%rsi), %ecx
151 ; X64-NEXT: bswapl %eax
152 ; X64-NEXT: bswapl %ecx
153 ; X64-NEXT: cmpl %ecx, %eax
154 ; X64-NEXT: jne .LBB9_3
155 ; X64-NEXT: # %bb.1: # %loadbb1
156 ; X64-NEXT: movzbl 4(%rdi), %eax
157 ; X64-NEXT: movzbl 4(%rsi), %ecx
158 ; X64-NEXT: subl %ecx, %eax
160 ; X64-NEXT: .LBB9_3: # %res_block
161 ; X64-NEXT: setae %al
162 ; X64-NEXT: movzbl %al, %eax
163 ; X64-NEXT: leal -1(%rax,%rax), %eax
165 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 5) nounwind
169 define i1 @length5_eq(ptr %X, ptr %Y) nounwind !prof !14 {
170 ; X64-LABEL: length5_eq:
172 ; X64-NEXT: movl (%rdi), %eax
173 ; X64-NEXT: xorl (%rsi), %eax
174 ; X64-NEXT: movb 4(%rdi), %cl
175 ; X64-NEXT: xorb 4(%rsi), %cl
176 ; X64-NEXT: movzbl %cl, %ecx
177 ; X64-NEXT: orl %eax, %ecx
178 ; X64-NEXT: setne %al
180 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 5) nounwind
181 %c = icmp ne i32 %m, 0
185 define i32 @length8(ptr %X, ptr %Y) nounwind !prof !14 {
186 ; X64-LABEL: length8:
188 ; X64-NEXT: movq (%rdi), %rcx
189 ; X64-NEXT: movq (%rsi), %rdx
190 ; X64-NEXT: bswapq %rcx
191 ; X64-NEXT: bswapq %rdx
192 ; X64-NEXT: xorl %eax, %eax
193 ; X64-NEXT: cmpq %rdx, %rcx
195 ; X64-NEXT: sbbl $0, %eax
197 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 8) nounwind
201 define i1 @length8_eq(ptr %X, ptr %Y) nounwind !prof !14 {
202 ; X64-LABEL: length8_eq:
204 ; X64-NEXT: movq (%rdi), %rax
205 ; X64-NEXT: cmpq (%rsi), %rax
208 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 8) nounwind
209 %c = icmp eq i32 %m, 0
213 define i1 @length8_eq_const(ptr %X) nounwind !prof !14 {
214 ; X64-LABEL: length8_eq_const:
216 ; X64-NEXT: movabsq $3978425819141910832, %rax # imm = 0x3736353433323130
217 ; X64-NEXT: cmpq %rax, (%rdi)
218 ; X64-NEXT: setne %al
220 %m = tail call i32 @memcmp(ptr %X, ptr @.str, i64 8) nounwind
221 %c = icmp ne i32 %m, 0
225 define i1 @length12_eq(ptr %X, ptr %Y) nounwind !prof !14 {
226 ; X64-LABEL: length12_eq:
228 ; X64-NEXT: movq (%rdi), %rax
229 ; X64-NEXT: xorq (%rsi), %rax
230 ; X64-NEXT: movl 8(%rdi), %ecx
231 ; X64-NEXT: xorl 8(%rsi), %ecx
232 ; X64-NEXT: orq %rax, %rcx
233 ; X64-NEXT: setne %al
235 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 12) nounwind
236 %c = icmp ne i32 %m, 0
240 define i32 @length12(ptr %X, ptr %Y) nounwind !prof !14 {
241 ; X64-LABEL: length12:
243 ; X64-NEXT: movq (%rdi), %rcx
244 ; X64-NEXT: movq (%rsi), %rdx
245 ; X64-NEXT: bswapq %rcx
246 ; X64-NEXT: bswapq %rdx
247 ; X64-NEXT: cmpq %rdx, %rcx
248 ; X64-NEXT: jne .LBB15_2
249 ; X64-NEXT: # %bb.1: # %loadbb1
250 ; X64-NEXT: movl 8(%rdi), %ecx
251 ; X64-NEXT: movl 8(%rsi), %edx
252 ; X64-NEXT: bswapl %ecx
253 ; X64-NEXT: bswapl %edx
254 ; X64-NEXT: xorl %eax, %eax
255 ; X64-NEXT: cmpq %rdx, %rcx
256 ; X64-NEXT: je .LBB15_3
257 ; X64-NEXT: .LBB15_2: # %res_block
258 ; X64-NEXT: xorl %eax, %eax
259 ; X64-NEXT: cmpq %rdx, %rcx
260 ; X64-NEXT: setae %al
261 ; X64-NEXT: leal -1(%rax,%rax), %eax
262 ; X64-NEXT: .LBB15_3: # %endblock
264 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 12) nounwind
268 ; PR33329 - https://bugs.llvm.org/show_bug.cgi?id=33329
270 define i32 @length16(ptr %X, ptr %Y) nounwind !prof !14 {
271 ; X64-LABEL: length16:
273 ; X64-NEXT: movq (%rdi), %rcx
274 ; X64-NEXT: movq (%rsi), %rdx
275 ; X64-NEXT: bswapq %rcx
276 ; X64-NEXT: bswapq %rdx
277 ; X64-NEXT: cmpq %rdx, %rcx
278 ; X64-NEXT: jne .LBB16_2
279 ; X64-NEXT: # %bb.1: # %loadbb1
280 ; X64-NEXT: movq 8(%rdi), %rcx
281 ; X64-NEXT: movq 8(%rsi), %rdx
282 ; X64-NEXT: bswapq %rcx
283 ; X64-NEXT: bswapq %rdx
284 ; X64-NEXT: xorl %eax, %eax
285 ; X64-NEXT: cmpq %rdx, %rcx
286 ; X64-NEXT: je .LBB16_3
287 ; X64-NEXT: .LBB16_2: # %res_block
288 ; X64-NEXT: xorl %eax, %eax
289 ; X64-NEXT: cmpq %rdx, %rcx
290 ; X64-NEXT: setae %al
291 ; X64-NEXT: leal -1(%rax,%rax), %eax
292 ; X64-NEXT: .LBB16_3: # %endblock
294 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 16) nounwind
298 define i1 @length16_eq(ptr %x, ptr %y) nounwind !prof !14 {
299 ; X64-SSE2-LABEL: length16_eq:
301 ; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
302 ; X64-SSE2-NEXT: movdqu (%rsi), %xmm1
303 ; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
304 ; X64-SSE2-NEXT: pmovmskb %xmm1, %eax
305 ; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
306 ; X64-SSE2-NEXT: setne %al
307 ; X64-SSE2-NEXT: retq
309 ; X64-AVX-LABEL: length16_eq:
311 ; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0
312 ; X64-AVX-NEXT: vpxor (%rsi), %xmm0, %xmm0
313 ; X64-AVX-NEXT: vptest %xmm0, %xmm0
314 ; X64-AVX-NEXT: setne %al
316 %call = tail call i32 @memcmp(ptr %x, ptr %y, i64 16) nounwind
317 %cmp = icmp ne i32 %call, 0
321 define i1 @length16_eq_const(ptr %X) nounwind !prof !14 {
322 ; X64-SSE2-LABEL: length16_eq_const:
324 ; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
325 ; X64-SSE2-NEXT: pcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
326 ; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
327 ; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
328 ; X64-SSE2-NEXT: sete %al
329 ; X64-SSE2-NEXT: retq
331 ; X64-AVX-LABEL: length16_eq_const:
333 ; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0
334 ; X64-AVX-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
335 ; X64-AVX-NEXT: vptest %xmm0, %xmm0
336 ; X64-AVX-NEXT: sete %al
338 %m = tail call i32 @memcmp(ptr %X, ptr @.str, i64 16) nounwind
339 %c = icmp eq i32 %m, 0
343 ; PR33914 - https://bugs.llvm.org/show_bug.cgi?id=33914
345 define i32 @length24(ptr %X, ptr %Y) nounwind !prof !14 {
346 ; X64-LABEL: length24:
348 ; X64-NEXT: movl $24, %edx
349 ; X64-NEXT: jmp memcmp # TAILCALL
350 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 24) nounwind
354 define i1 @length24_eq(ptr %x, ptr %y) nounwind !prof !14 {
355 ; X64-SSE2-LABEL: length24_eq:
357 ; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
358 ; X64-SSE2-NEXT: movdqu (%rsi), %xmm1
359 ; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
360 ; X64-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
361 ; X64-SSE2-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
362 ; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm2
363 ; X64-SSE2-NEXT: pand %xmm1, %xmm2
364 ; X64-SSE2-NEXT: pmovmskb %xmm2, %eax
365 ; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
366 ; X64-SSE2-NEXT: sete %al
367 ; X64-SSE2-NEXT: retq
369 ; X64-AVX-LABEL: length24_eq:
371 ; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0
372 ; X64-AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
373 ; X64-AVX-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
374 ; X64-AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
375 ; X64-AVX-NEXT: vpxor (%rsi), %xmm0, %xmm0
376 ; X64-AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
377 ; X64-AVX-NEXT: vptest %xmm0, %xmm0
378 ; X64-AVX-NEXT: sete %al
380 %call = tail call i32 @memcmp(ptr %x, ptr %y, i64 24) nounwind
381 %cmp = icmp eq i32 %call, 0
385 define i1 @length24_eq_const(ptr %X) nounwind !prof !14 {
386 ; X64-SSE2-LABEL: length24_eq_const:
388 ; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
389 ; X64-SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
390 ; X64-SSE2-NEXT: pcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
391 ; X64-SSE2-NEXT: pcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
392 ; X64-SSE2-NEXT: pand %xmm1, %xmm0
393 ; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
394 ; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
395 ; X64-SSE2-NEXT: setne %al
396 ; X64-SSE2-NEXT: retq
398 ; X64-AVX-LABEL: length24_eq_const:
400 ; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0
401 ; X64-AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
402 ; X64-AVX-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
403 ; X64-AVX-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
404 ; X64-AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
405 ; X64-AVX-NEXT: vptest %xmm0, %xmm0
406 ; X64-AVX-NEXT: setne %al
408 %m = tail call i32 @memcmp(ptr %X, ptr @.str, i64 24) nounwind
409 %c = icmp ne i32 %m, 0
413 define i32 @length32(ptr %X, ptr %Y) nounwind !prof !14 {
414 ; X64-LABEL: length32:
416 ; X64-NEXT: movl $32, %edx
417 ; X64-NEXT: jmp memcmp # TAILCALL
418 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 32) nounwind
422 ; PR33325 - https://bugs.llvm.org/show_bug.cgi?id=33325
424 define i1 @length32_eq(ptr %x, ptr %y) nounwind !prof !14 {
425 ; X64-SSE2-LABEL: length32_eq:
427 ; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
428 ; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1
429 ; X64-SSE2-NEXT: movdqu (%rsi), %xmm2
430 ; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm2
431 ; X64-SSE2-NEXT: movdqu 16(%rsi), %xmm0
432 ; X64-SSE2-NEXT: pcmpeqb %xmm1, %xmm0
433 ; X64-SSE2-NEXT: pand %xmm2, %xmm0
434 ; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
435 ; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
436 ; X64-SSE2-NEXT: sete %al
437 ; X64-SSE2-NEXT: retq
439 ; X64-AVX1-LABEL: length32_eq:
441 ; X64-AVX1-NEXT: vmovups (%rdi), %ymm0
442 ; X64-AVX1-NEXT: vxorps (%rsi), %ymm0, %ymm0
443 ; X64-AVX1-NEXT: vptest %ymm0, %ymm0
444 ; X64-AVX1-NEXT: sete %al
445 ; X64-AVX1-NEXT: vzeroupper
446 ; X64-AVX1-NEXT: retq
448 ; X64-AVX2-LABEL: length32_eq:
450 ; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
451 ; X64-AVX2-NEXT: vpxor (%rsi), %ymm0, %ymm0
452 ; X64-AVX2-NEXT: vptest %ymm0, %ymm0
453 ; X64-AVX2-NEXT: sete %al
454 ; X64-AVX2-NEXT: vzeroupper
455 ; X64-AVX2-NEXT: retq
456 %call = tail call i32 @memcmp(ptr %x, ptr %y, i64 32) nounwind
457 %cmp = icmp eq i32 %call, 0
461 define i1 @length32_eq_const(ptr %X) nounwind !prof !14 {
462 ; X64-SSE2-LABEL: length32_eq_const:
464 ; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
465 ; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1
466 ; X64-SSE2-NEXT: pcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
467 ; X64-SSE2-NEXT: pcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
468 ; X64-SSE2-NEXT: pand %xmm1, %xmm0
469 ; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
470 ; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
471 ; X64-SSE2-NEXT: setne %al
472 ; X64-SSE2-NEXT: retq
474 ; X64-AVX1-LABEL: length32_eq_const:
476 ; X64-AVX1-NEXT: vmovups (%rdi), %ymm0
477 ; X64-AVX1-NEXT: vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
478 ; X64-AVX1-NEXT: vptest %ymm0, %ymm0
479 ; X64-AVX1-NEXT: setne %al
480 ; X64-AVX1-NEXT: vzeroupper
481 ; X64-AVX1-NEXT: retq
483 ; X64-AVX2-LABEL: length32_eq_const:
485 ; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
486 ; X64-AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
487 ; X64-AVX2-NEXT: vptest %ymm0, %ymm0
488 ; X64-AVX2-NEXT: setne %al
489 ; X64-AVX2-NEXT: vzeroupper
490 ; X64-AVX2-NEXT: retq
491 %m = tail call i32 @memcmp(ptr %X, ptr @.str, i64 32) nounwind
492 %c = icmp ne i32 %m, 0
496 define i32 @length64(ptr %X, ptr %Y) nounwind !prof !14 {
497 ; X64-LABEL: length64:
499 ; X64-NEXT: movl $64, %edx
500 ; X64-NEXT: jmp memcmp # TAILCALL
501 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 64) nounwind
505 define i1 @length64_eq(ptr %x, ptr %y) nounwind !prof !14 {
506 ; X64-SSE2-LABEL: length64_eq:
508 ; X64-SSE2-NEXT: pushq %rax
509 ; X64-SSE2-NEXT: movl $64, %edx
510 ; X64-SSE2-NEXT: callq memcmp
511 ; X64-SSE2-NEXT: testl %eax, %eax
512 ; X64-SSE2-NEXT: setne %al
513 ; X64-SSE2-NEXT: popq %rcx
514 ; X64-SSE2-NEXT: retq
516 ; X64-AVX1-LABEL: length64_eq:
518 ; X64-AVX1-NEXT: vmovups (%rdi), %ymm0
519 ; X64-AVX1-NEXT: vmovups 32(%rdi), %ymm1
520 ; X64-AVX1-NEXT: vxorps 32(%rsi), %ymm1, %ymm1
521 ; X64-AVX1-NEXT: vxorps (%rsi), %ymm0, %ymm0
522 ; X64-AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
523 ; X64-AVX1-NEXT: vptest %ymm0, %ymm0
524 ; X64-AVX1-NEXT: setne %al
525 ; X64-AVX1-NEXT: vzeroupper
526 ; X64-AVX1-NEXT: retq
528 ; X64-AVX2-LABEL: length64_eq:
530 ; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
531 ; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1
532 ; X64-AVX2-NEXT: vpxor 32(%rsi), %ymm1, %ymm1
533 ; X64-AVX2-NEXT: vpxor (%rsi), %ymm0, %ymm0
534 ; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
535 ; X64-AVX2-NEXT: vptest %ymm0, %ymm0
536 ; X64-AVX2-NEXT: setne %al
537 ; X64-AVX2-NEXT: vzeroupper
538 ; X64-AVX2-NEXT: retq
539 %call = tail call i32 @memcmp(ptr %x, ptr %y, i64 64) nounwind
540 %cmp = icmp ne i32 %call, 0
544 define i1 @length64_eq_const(ptr %X) nounwind !prof !14 {
545 ; X64-SSE2-LABEL: length64_eq_const:
547 ; X64-SSE2-NEXT: pushq %rax
548 ; X64-SSE2-NEXT: movl $.L.str, %esi
549 ; X64-SSE2-NEXT: movl $64, %edx
550 ; X64-SSE2-NEXT: callq memcmp
551 ; X64-SSE2-NEXT: testl %eax, %eax
552 ; X64-SSE2-NEXT: sete %al
553 ; X64-SSE2-NEXT: popq %rcx
554 ; X64-SSE2-NEXT: retq
556 ; X64-AVX1-LABEL: length64_eq_const:
558 ; X64-AVX1-NEXT: vmovups (%rdi), %ymm0
559 ; X64-AVX1-NEXT: vmovups 32(%rdi), %ymm1
560 ; X64-AVX1-NEXT: vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
561 ; X64-AVX1-NEXT: vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
562 ; X64-AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
563 ; X64-AVX1-NEXT: vptest %ymm0, %ymm0
564 ; X64-AVX1-NEXT: sete %al
565 ; X64-AVX1-NEXT: vzeroupper
566 ; X64-AVX1-NEXT: retq
568 ; X64-AVX2-LABEL: length64_eq_const:
570 ; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
571 ; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1
572 ; X64-AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
573 ; X64-AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
574 ; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
575 ; X64-AVX2-NEXT: vptest %ymm0, %ymm0
576 ; X64-AVX2-NEXT: sete %al
577 ; X64-AVX2-NEXT: vzeroupper
578 ; X64-AVX2-NEXT: retq
579 %m = tail call i32 @memcmp(ptr %X, ptr @.str, i64 64) nounwind
580 %c = icmp eq i32 %m, 0
584 define i32 @bcmp_length2(ptr %X, ptr %Y) nounwind !prof !14 {
585 ; X64-LABEL: bcmp_length2:
587 ; X64-NEXT: movzwl (%rdi), %ecx
588 ; X64-NEXT: xorl %eax, %eax
589 ; X64-NEXT: cmpw (%rsi), %cx
590 ; X64-NEXT: setne %al
592 %m = tail call i32 @bcmp(ptr %X, ptr %Y, i64 2) nounwind
596 !llvm.module.flags = !{!0}
597 !0 = !{i32 1, !"ProfileSummary", !1}
598 !1 = !{!2, !3, !4, !5, !6, !7, !8, !9}
599 !2 = !{!"ProfileFormat", !"InstrProf"}
600 !3 = !{!"TotalCount", i64 10000}
601 !4 = !{!"MaxCount", i64 10}
602 !5 = !{!"MaxInternalCount", i64 1}
603 !6 = !{!"MaxFunctionCount", i64 1000}
604 !7 = !{!"NumCounts", i64 3}
605 !8 = !{!"NumFunctions", i64 3}
606 !9 = !{!"DetailedSummary", !10}
607 !10 = !{!11, !12, !13}
608 !11 = !{i32 10000, i64 100, i32 1}
609 !12 = !{i32 999000, i64 100, i32 1}
610 !13 = !{i32 999999, i64 1, i32 2}
611 !14 = !{!"function_entry_count", i64 0}