1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_STRING_64_H
3 #define _ASM_X86_STRING_64_H
6 #include <linux/jump_label.h>
8 /* Written 2002 by Andi Kleen */
10 /* Even with __builtin_ the compiler may decide to use the out of line
13 #define __HAVE_ARCH_MEMCPY 1
14 extern void *memcpy(void *to
, const void *from
, size_t len
);
15 extern void *__memcpy(void *to
, const void *from
, size_t len
);
17 #define __HAVE_ARCH_MEMSET
18 void *memset(void *s
, int c
, size_t n
);
19 void *__memset(void *s
, int c
, size_t n
);
21 #define __HAVE_ARCH_MEMSET16
22 static inline void *memset16(uint16_t *s
, uint16_t v
, size_t n
)
25 asm volatile("rep\n\t"
27 : "=&c" (d0
), "=&D" (d1
)
28 : "a" (v
), "1" (s
), "0" (n
)
33 #define __HAVE_ARCH_MEMSET32
34 static inline void *memset32(uint32_t *s
, uint32_t v
, size_t n
)
37 asm volatile("rep\n\t"
39 : "=&c" (d0
), "=&D" (d1
)
40 : "a" (v
), "1" (s
), "0" (n
)
45 #define __HAVE_ARCH_MEMSET64
46 static inline void *memset64(uint64_t *s
, uint64_t v
, size_t n
)
49 asm volatile("rep\n\t"
51 : "=&c" (d0
), "=&D" (d1
)
52 : "a" (v
), "1" (s
), "0" (n
)
57 #define __HAVE_ARCH_MEMMOVE
58 void *memmove(void *dest
, const void *src
, size_t count
);
59 void *__memmove(void *dest
, const void *src
, size_t count
);
61 int memcmp(const void *cs
, const void *ct
, size_t count
);
62 size_t strlen(const char *s
);
63 char *strcpy(char *dest
, const char *src
);
64 char *strcat(char *dest
, const char *src
);
65 int strcmp(const char *cs
, const char *ct
);
67 #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
70 * For files that not instrumented (e.g. mm/slub.c) we
71 * should use not instrumented version of mem* functions.
75 #define memcpy(dst, src, len) __memcpy(dst, src, len)
76 #define memmove(dst, src, len) __memmove(dst, src, len)
77 #define memset(s, c, n) __memset(s, c, n)
80 #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
85 #define __HAVE_ARCH_MEMCPY_MCSAFE 1
86 __must_check
unsigned long __memcpy_mcsafe(void *dst
, const void *src
,
88 DECLARE_STATIC_KEY_FALSE(mcsafe_key
);
91 * memcpy_mcsafe - copy memory with indication if a machine check happened
93 * @dst: destination address
94 * @src: source address
95 * @cnt: number of bytes to copy
97 * Low level memory copy function that catches machine checks
98 * We only call into the "safe" function on systems that can
99 * actually do machine check recovery. Everyone else can just
102 * Return 0 for success, or number of bytes not copied if there was an
105 static __always_inline __must_check
unsigned long
106 memcpy_mcsafe(void *dst
, const void *src
, size_t cnt
)
108 #ifdef CONFIG_X86_MCE
109 if (static_branch_unlikely(&mcsafe_key
))
110 return __memcpy_mcsafe(dst
, src
, cnt
);
113 memcpy(dst
, src
, cnt
);
117 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
118 #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
119 void __memcpy_flushcache(void *dst
, const void *src
, size_t cnt
);
120 static __always_inline
void memcpy_flushcache(void *dst
, const void *src
, size_t cnt
)
122 if (__builtin_constant_p(cnt
)) {
125 asm ("movntil %1, %0" : "=m"(*(u32
*)dst
) : "r"(*(u32
*)src
));
128 asm ("movntiq %1, %0" : "=m"(*(u64
*)dst
) : "r"(*(u64
*)src
));
131 asm ("movntiq %1, %0" : "=m"(*(u64
*)dst
) : "r"(*(u64
*)src
));
132 asm ("movntiq %1, %0" : "=m"(*(u64
*)(dst
+ 8)) : "r"(*(u64
*)(src
+ 8)));
136 __memcpy_flushcache(dst
, src
, cnt
);
140 #endif /* __KERNEL__ */
142 #endif /* _ASM_X86_STRING_64_H */