1 #ifndef _ASM_X86_STRING_64_H
2 #define _ASM_X86_STRING_64_H
5 #include <linux/jump_label.h>
7 /* Written 2002 by Andi Kleen */
9 /* Only used for special circumstances. Stolen from i386/string.h */
10 static __always_inline
void *__inline_memcpy(void *to
, const void *from
, size_t n
)
12 unsigned long d0
, d1
, d2
;
13 asm volatile("rep ; movsl\n\t"
17 "1:\ttestb $1,%b4\n\t"
21 : "=&c" (d0
), "=&D" (d1
), "=&S" (d2
)
22 : "0" (n
/ 4), "q" (n
), "1" ((long)to
), "2" ((long)from
)
27 /* Even with __builtin_ the compiler may decide to use the out of line
30 #define __HAVE_ARCH_MEMCPY 1
31 extern void *memcpy(void *to
, const void *from
, size_t len
);
32 extern void *__memcpy(void *to
, const void *from
, size_t len
);
34 #ifndef CONFIG_FORTIFY_SOURCE
35 #ifndef CONFIG_KMEMCHECK
36 #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
37 #define memcpy(dst, src, len) \
39 size_t __len = (len); \
41 if (__builtin_constant_p(len) && __len >= 64) \
42 __ret = __memcpy((dst), (src), __len); \
44 __ret = __builtin_memcpy((dst), (src), __len); \
50 * kmemcheck becomes very happy if we use the REP instructions unconditionally,
51 * because it means that we know both memory operands in advance.
53 #define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
55 #endif /* !CONFIG_FORTIFY_SOURCE */
57 #define __HAVE_ARCH_MEMSET
58 void *memset(void *s
, int c
, size_t n
);
59 void *__memset(void *s
, int c
, size_t n
);
61 #define __HAVE_ARCH_MEMMOVE
62 void *memmove(void *dest
, const void *src
, size_t count
);
63 void *__memmove(void *dest
, const void *src
, size_t count
);
65 int memcmp(const void *cs
, const void *ct
, size_t count
);
66 size_t strlen(const char *s
);
67 char *strcpy(char *dest
, const char *src
);
68 char *strcat(char *dest
, const char *src
);
69 int strcmp(const char *cs
, const char *ct
);
71 #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
74 * For files that not instrumented (e.g. mm/slub.c) we
75 * should use not instrumented version of mem* functions.
79 #define memcpy(dst, src, len) __memcpy(dst, src, len)
80 #define memmove(dst, src, len) __memmove(dst, src, len)
81 #define memset(s, c, n) __memset(s, c, n)
84 #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
89 #define __HAVE_ARCH_MEMCPY_MCSAFE 1
90 __must_check
int memcpy_mcsafe_unrolled(void *dst
, const void *src
, size_t cnt
);
91 DECLARE_STATIC_KEY_FALSE(mcsafe_key
);
94 * memcpy_mcsafe - copy memory with indication if a machine check happened
96 * @dst: destination address
97 * @src: source address
98 * @cnt: number of bytes to copy
100 * Low level memory copy function that catches machine checks
101 * We only call into the "safe" function on systems that can
102 * actually do machine check recovery. Everyone else can just
105 * Return 0 for success, -EFAULT for fail
107 static __always_inline __must_check
int
108 memcpy_mcsafe(void *dst
, const void *src
, size_t cnt
)
110 #ifdef CONFIG_X86_MCE
111 if (static_branch_unlikely(&mcsafe_key
))
112 return memcpy_mcsafe_unrolled(dst
, src
, cnt
);
115 memcpy(dst
, src
, cnt
);
119 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
120 #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
121 void memcpy_flushcache(void *dst
, const void *src
, size_t cnt
);
124 #endif /* __KERNEL__ */
126 #endif /* _ASM_X86_STRING_64_H */