treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / x86 / include / asm / string_64.h
blob75314c3dbe471efd37f2169b1603e5215ee68f32
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_STRING_64_H
3 #define _ASM_X86_STRING_64_H
5 #ifdef __KERNEL__
6 #include <linux/jump_label.h>
8 /* Written 2002 by Andi Kleen */
10 /* Even with __builtin_ the compiler may decide to use the out of line
11 function. */
13 #define __HAVE_ARCH_MEMCPY 1
14 extern void *memcpy(void *to, const void *from, size_t len);
15 extern void *__memcpy(void *to, const void *from, size_t len);
17 #define __HAVE_ARCH_MEMSET
18 void *memset(void *s, int c, size_t n);
19 void *__memset(void *s, int c, size_t n);
21 #define __HAVE_ARCH_MEMSET16
22 static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
24 long d0, d1;
25 asm volatile("rep\n\t"
26 "stosw"
27 : "=&c" (d0), "=&D" (d1)
28 : "a" (v), "1" (s), "0" (n)
29 : "memory");
30 return s;
33 #define __HAVE_ARCH_MEMSET32
34 static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
36 long d0, d1;
37 asm volatile("rep\n\t"
38 "stosl"
39 : "=&c" (d0), "=&D" (d1)
40 : "a" (v), "1" (s), "0" (n)
41 : "memory");
42 return s;
45 #define __HAVE_ARCH_MEMSET64
46 static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
48 long d0, d1;
49 asm volatile("rep\n\t"
50 "stosq"
51 : "=&c" (d0), "=&D" (d1)
52 : "a" (v), "1" (s), "0" (n)
53 : "memory");
54 return s;
57 #define __HAVE_ARCH_MEMMOVE
58 void *memmove(void *dest, const void *src, size_t count);
59 void *__memmove(void *dest, const void *src, size_t count);
61 int memcmp(const void *cs, const void *ct, size_t count);
62 size_t strlen(const char *s);
63 char *strcpy(char *dest, const char *src);
64 char *strcat(char *dest, const char *src);
65 int strcmp(const char *cs, const char *ct);
67 #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
70 * For files that not instrumented (e.g. mm/slub.c) we
71 * should use not instrumented version of mem* functions.
74 #undef memcpy
75 #define memcpy(dst, src, len) __memcpy(dst, src, len)
76 #define memmove(dst, src, len) __memmove(dst, src, len)
77 #define memset(s, c, n) __memset(s, c, n)
79 #ifndef __NO_FORTIFY
80 #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
81 #endif
83 #endif
85 #define __HAVE_ARCH_MEMCPY_MCSAFE 1
86 __must_check unsigned long __memcpy_mcsafe(void *dst, const void *src,
87 size_t cnt);
88 DECLARE_STATIC_KEY_FALSE(mcsafe_key);
90 /**
91 * memcpy_mcsafe - copy memory with indication if a machine check happened
93 * @dst: destination address
94 * @src: source address
95 * @cnt: number of bytes to copy
97 * Low level memory copy function that catches machine checks
98 * We only call into the "safe" function on systems that can
99 * actually do machine check recovery. Everyone else can just
100 * use memcpy().
102 * Return 0 for success, or number of bytes not copied if there was an
103 * exception.
105 static __always_inline __must_check unsigned long
106 memcpy_mcsafe(void *dst, const void *src, size_t cnt)
108 #ifdef CONFIG_X86_MCE
109 if (static_branch_unlikely(&mcsafe_key))
110 return __memcpy_mcsafe(dst, src, cnt);
111 else
112 #endif
113 memcpy(dst, src, cnt);
114 return 0;
117 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
118 #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
119 void __memcpy_flushcache(void *dst, const void *src, size_t cnt);
120 static __always_inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
122 if (__builtin_constant_p(cnt)) {
123 switch (cnt) {
124 case 4:
125 asm ("movntil %1, %0" : "=m"(*(u32 *)dst) : "r"(*(u32 *)src));
126 return;
127 case 8:
128 asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
129 return;
130 case 16:
131 asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
132 asm ("movntiq %1, %0" : "=m"(*(u64 *)(dst + 8)) : "r"(*(u64 *)(src + 8)));
133 return;
136 __memcpy_flushcache(dst, src, cnt);
138 #endif
140 #endif /* __KERNEL__ */
142 #endif /* _ASM_X86_STRING_64_H */