1 //===-- Implementation of memcmp ------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef LLVM_LIBC_SRC_STRING_MEMORY_UTILS_MEMCMP_IMPLEMENTATIONS_H
10 #define LLVM_LIBC_SRC_STRING_MEMORY_UTILS_MEMCMP_IMPLEMENTATIONS_H
12 #include "src/__support/common.h"
13 #include "src/__support/macros/optimization.h" // LIBC_UNLIKELY LIBC_LOOP_NOUNROLL
14 #include "src/__support/macros/properties/architectures.h"
15 #include "src/string/memory_utils/op_generic.h"
16 #include "src/string/memory_utils/utils.h" // CPtr MemcmpReturnType
18 #include <stddef.h> // size_t
20 #if defined(LIBC_TARGET_ARCH_IS_X86)
21 #include "src/string/memory_utils/x86_64/memcmp_implementations.h"
22 #elif defined(LIBC_TARGET_ARCH_IS_AARCH64)
23 #include "src/string/memory_utils/aarch64/memcmp_implementations.h"
26 namespace __llvm_libc
{
28 [[maybe_unused
]] LIBC_INLINE MemcmpReturnType
29 inline_memcmp_byte_per_byte(CPtr p1
, CPtr p2
, size_t offset
, size_t count
) {
31 for (; offset
< count
; ++offset
)
32 if (auto value
= generic::Memcmp
<1>::block(p1
+ offset
, p2
+ offset
))
34 return MemcmpReturnType::ZERO();
37 [[maybe_unused
]] LIBC_INLINE MemcmpReturnType
38 inline_memcmp_aligned_access_64bit(CPtr p1
, CPtr p2
, size_t count
) {
39 constexpr size_t kAlign
= sizeof(uint64_t);
40 if (count
<= 2 * kAlign
)
41 return inline_memcmp_byte_per_byte(p1
, p2
, 0, count
);
42 size_t bytes_to_p1_align
= distance_to_align_up
<kAlign
>(p1
);
43 if (auto value
= inline_memcmp_byte_per_byte(p1
, p2
, 0, bytes_to_p1_align
))
45 size_t offset
= bytes_to_p1_align
;
46 size_t p2_alignment
= distance_to_align_down
<kAlign
>(p2
+ offset
);
47 for (; offset
< count
- kAlign
; offset
+= kAlign
) {
49 if (p2_alignment
== 0)
50 b
= load64_aligned
<uint64_t>(p2
, offset
);
51 else if (p2_alignment
== 4)
52 b
= load64_aligned
<uint32_t, uint32_t>(p2
, offset
);
53 else if (p2_alignment
== 2)
54 b
= load64_aligned
<uint16_t, uint16_t, uint16_t, uint16_t>(p2
, offset
);
56 b
= load64_aligned
<uint8_t, uint16_t, uint16_t, uint16_t, uint8_t>(
58 uint64_t a
= load64_aligned
<uint64_t>(p1
, offset
);
60 // TODO use cmp_neq_uint64_t from D148717 once it's submitted.
61 return Endian::to_big_endian(a
) < Endian::to_big_endian(b
) ? -1 : 1;
64 return inline_memcmp_byte_per_byte(p1
, p2
, offset
, count
);
67 [[maybe_unused
]] LIBC_INLINE MemcmpReturnType
68 inline_memcmp_aligned_access_32bit(CPtr p1
, CPtr p2
, size_t count
) {
69 constexpr size_t kAlign
= sizeof(uint32_t);
70 if (count
<= 2 * kAlign
)
71 return inline_memcmp_byte_per_byte(p1
, p2
, 0, count
);
72 size_t bytes_to_p1_align
= distance_to_align_up
<kAlign
>(p1
);
73 if (auto value
= inline_memcmp_byte_per_byte(p1
, p2
, 0, bytes_to_p1_align
))
75 size_t offset
= bytes_to_p1_align
;
76 size_t p2_alignment
= distance_to_align_down
<kAlign
>(p2
+ offset
);
77 for (; offset
< count
- kAlign
; offset
+= kAlign
) {
79 if (p2_alignment
== 0)
80 b
= load32_aligned
<uint32_t>(p2
, offset
);
81 else if (p2_alignment
== 2)
82 b
= load32_aligned
<uint16_t, uint16_t>(p2
, offset
);
84 b
= load32_aligned
<uint8_t, uint16_t, uint8_t>(p2
, offset
);
85 uint32_t a
= load32_aligned
<uint32_t>(p1
, offset
);
87 // TODO use cmp_uint32_t from D148717 once it's submitted.
88 // We perform the difference as an uint64_t.
89 const int64_t diff
= static_cast<int64_t>(Endian::to_big_endian(a
)) -
90 static_cast<int64_t>(Endian::to_big_endian(b
));
91 // And reduce the uint64_t into an uint32_t.
92 return static_cast<int32_t>((diff
>> 1) | (diff
& 0xFFFF));
95 return inline_memcmp_byte_per_byte(p1
, p2
, offset
, count
);
98 LIBC_INLINE MemcmpReturnType
inline_memcmp(CPtr p1
, CPtr p2
, size_t count
) {
99 #if defined(LIBC_TARGET_ARCH_IS_X86)
100 return inline_memcmp_x86(p1
, p2
, count
);
101 #elif defined(LIBC_TARGET_ARCH_IS_AARCH64)
102 return inline_memcmp_aarch64(p1
, p2
, count
);
103 #elif defined(LIBC_TARGET_ARCH_IS_RISCV64)
104 return inline_memcmp_aligned_access_64bit(p1
, p2
, count
);
105 #elif defined(LIBC_TARGET_ARCH_IS_RISCV32)
106 return inline_memcmp_aligned_access_32bit(p1
, p2
, count
);
108 return inline_memcmp_byte_per_byte(p1
, p2
, 0, count
);
112 LIBC_INLINE
int inline_memcmp(const void *p1
, const void *p2
, size_t count
) {
113 return static_cast<int>(inline_memcmp(reinterpret_cast<CPtr
>(p1
),
114 reinterpret_cast<CPtr
>(p2
), count
));
117 } // namespace __llvm_libc
119 #endif // LLVM_LIBC_SRC_STRING_MEMORY_UTILS_MEMCMP_IMPLEMENTATIONS_H