1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_STRING_64_H
3 #define _ASM_X86_STRING_64_H
6 #include <linux/jump_label.h>
8 /* Written 2002 by Andi Kleen */
10 /* Only used for special circumstances. Stolen from i386/string.h */
11 static __always_inline
void *__inline_memcpy(void *to
, const void *from
, size_t n
)
13 unsigned long d0
, d1
, d2
;
14 asm volatile("rep ; movsl\n\t"
18 "1:\ttestb $1,%b4\n\t"
22 : "=&c" (d0
), "=&D" (d1
), "=&S" (d2
)
23 : "0" (n
/ 4), "q" (n
), "1" ((long)to
), "2" ((long)from
)
28 /* Even with __builtin_ the compiler may decide to use the out of line
31 #define __HAVE_ARCH_MEMCPY 1
32 extern void *memcpy(void *to
, const void *from
, size_t len
);
33 extern void *__memcpy(void *to
, const void *from
, size_t len
);
35 #define __HAVE_ARCH_MEMSET
36 void *memset(void *s
, int c
, size_t n
);
37 void *__memset(void *s
, int c
, size_t n
);
39 #define __HAVE_ARCH_MEMSET16
40 static inline void *memset16(uint16_t *s
, uint16_t v
, size_t n
)
43 asm volatile("rep\n\t"
45 : "=&c" (d0
), "=&D" (d1
)
46 : "a" (v
), "1" (s
), "0" (n
)
51 #define __HAVE_ARCH_MEMSET32
52 static inline void *memset32(uint32_t *s
, uint32_t v
, size_t n
)
55 asm volatile("rep\n\t"
57 : "=&c" (d0
), "=&D" (d1
)
58 : "a" (v
), "1" (s
), "0" (n
)
63 #define __HAVE_ARCH_MEMSET64
64 static inline void *memset64(uint64_t *s
, uint64_t v
, size_t n
)
67 asm volatile("rep\n\t"
69 : "=&c" (d0
), "=&D" (d1
)
70 : "a" (v
), "1" (s
), "0" (n
)
75 #define __HAVE_ARCH_MEMMOVE
76 void *memmove(void *dest
, const void *src
, size_t count
);
77 void *__memmove(void *dest
, const void *src
, size_t count
);
79 int memcmp(const void *cs
, const void *ct
, size_t count
);
80 size_t strlen(const char *s
);
81 char *strcpy(char *dest
, const char *src
);
82 char *strcat(char *dest
, const char *src
);
83 int strcmp(const char *cs
, const char *ct
);
85 #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
88 * For files that not instrumented (e.g. mm/slub.c) we
89 * should use not instrumented version of mem* functions.
93 #define memcpy(dst, src, len) __memcpy(dst, src, len)
94 #define memmove(dst, src, len) __memmove(dst, src, len)
95 #define memset(s, c, n) __memset(s, c, n)
98 #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
103 #define __HAVE_ARCH_MEMCPY_MCSAFE 1
104 __must_check
unsigned long __memcpy_mcsafe(void *dst
, const void *src
,
106 DECLARE_STATIC_KEY_FALSE(mcsafe_key
);
109 * memcpy_mcsafe - copy memory with indication if a machine check happened
111 * @dst: destination address
112 * @src: source address
113 * @cnt: number of bytes to copy
115 * Low level memory copy function that catches machine checks
116 * We only call into the "safe" function on systems that can
117 * actually do machine check recovery. Everyone else can just
120 * Return 0 for success, or number of bytes not copied if there was an
123 static __always_inline __must_check
unsigned long
124 memcpy_mcsafe(void *dst
, const void *src
, size_t cnt
)
126 #ifdef CONFIG_X86_MCE
127 if (static_branch_unlikely(&mcsafe_key
))
128 return __memcpy_mcsafe(dst
, src
, cnt
);
131 memcpy(dst
, src
, cnt
);
135 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
136 #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
137 void memcpy_flushcache(void *dst
, const void *src
, size_t cnt
);
140 #endif /* __KERNEL__ */
142 #endif /* _ASM_X86_STRING_64_H */