1 // SPDX-License-Identifier: GPL-2.0-only
3 * User address space access functions.
5 * Copyright 1997 Andi Kleen <ak@muc.de>
6 * Copyright 1997 Linus Torvalds
7 * Copyright 2002 Andi Kleen <ak@suse.de>
9 #include <linux/export.h>
10 #include <linux/uaccess.h>
11 #include <linux/highmem.h>
17 unsigned long __clear_user(void __user
*addr
, unsigned long size
)
21 /* no memory constraint because it doesn't change any memory gcc knows
25 " testq %[size8],%[size8]\n"
28 "0: movq $0,(%[dst])\n"
30 " decl %%ecx ; jnz 0b\n"
31 "4: movq %[size1],%%rcx\n"
32 " testl %%ecx,%%ecx\n"
34 "1: movb $0,(%[dst])\n"
36 " decl %%ecx ; jnz 1b\n"
38 ".section .fixup,\"ax\"\n"
39 "3: lea 0(%[size1],%[size8],8),%[size8]\n"
42 _ASM_EXTABLE_UA(0b
, 3b
)
43 _ASM_EXTABLE_UA(1b
, 2b
)
44 : [size8
] "=&c"(size
), [dst
] "=&D" (__d0
)
45 : [size1
] "r"(size
& 7), "[size8]" (size
/ 8), "[dst]"(addr
));
49 EXPORT_SYMBOL(__clear_user
);
51 unsigned long clear_user(void __user
*to
, unsigned long n
)
54 return __clear_user(to
, n
);
57 EXPORT_SYMBOL(clear_user
);
59 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
61 * clean_cache_range - write back a cache range with CLWB
62 * @vaddr: virtual start address
63 * @size: number of bytes to write back
65 * Write back a cache range using the CLWB (cache line write back)
66 * instruction. Note that @size is internally rounded up to be cache
69 static void clean_cache_range(void *addr
, size_t size
)
71 u16 x86_clflush_size
= boot_cpu_data
.x86_clflush_size
;
72 unsigned long clflush_mask
= x86_clflush_size
- 1;
73 void *vend
= addr
+ size
;
76 for (p
= (void *)((unsigned long)addr
& ~clflush_mask
);
77 p
< vend
; p
+= x86_clflush_size
)
81 void arch_wb_cache_pmem(void *addr
, size_t size
)
83 clean_cache_range(addr
, size
);
85 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem
);
87 long __copy_user_flushcache(void *dst
, const void __user
*src
, unsigned size
)
89 unsigned long flushed
, dest
= (unsigned long) dst
;
90 long rc
= __copy_user_nocache(dst
, src
, size
, 0);
93 * __copy_user_nocache() uses non-temporal stores for the bulk
94 * of the transfer, but we need to manually flush if the
95 * transfer is unaligned. A cached memory copy is used when
96 * destination or size is not naturally aligned. That is:
97 * - Require 8-byte alignment when size is 8 bytes or larger.
98 * - Require 4-byte alignment when size is 4 bytes.
101 if (!IS_ALIGNED(dest
, 4) || size
!= 4)
102 clean_cache_range(dst
, size
);
104 if (!IS_ALIGNED(dest
, 8)) {
105 dest
= ALIGN(dest
, boot_cpu_data
.x86_clflush_size
);
106 clean_cache_range(dst
, 1);
109 flushed
= dest
- (unsigned long) dst
;
110 if (size
> flushed
&& !IS_ALIGNED(size
- flushed
, 8))
111 clean_cache_range(dst
+ size
- 1, 1);
117 void __memcpy_flushcache(void *_dst
, const void *_src
, size_t size
)
119 unsigned long dest
= (unsigned long) _dst
;
120 unsigned long source
= (unsigned long) _src
;
122 /* cache copy and flush to align dest */
123 if (!IS_ALIGNED(dest
, 8)) {
124 unsigned len
= min_t(unsigned, size
, ALIGN(dest
, 8) - dest
);
126 memcpy((void *) dest
, (void *) source
, len
);
127 clean_cache_range((void *) dest
, len
);
135 /* 4x8 movnti loop */
137 asm("movq (%0), %%r8\n"
139 "movq 16(%0), %%r10\n"
140 "movq 24(%0), %%r11\n"
141 "movnti %%r8, (%1)\n"
142 "movnti %%r9, 8(%1)\n"
143 "movnti %%r10, 16(%1)\n"
144 "movnti %%r11, 24(%1)\n"
145 :: "r" (source
), "r" (dest
)
146 : "memory", "r8", "r9", "r10", "r11");
152 /* 1x8 movnti loop */
154 asm("movq (%0), %%r8\n"
155 "movnti %%r8, (%1)\n"
156 :: "r" (source
), "r" (dest
)
163 /* 1x4 movnti loop */
165 asm("movl (%0), %%r8d\n"
166 "movnti %%r8d, (%1)\n"
167 :: "r" (source
), "r" (dest
)
174 /* cache copy for remaining bytes */
176 memcpy((void *) dest
, (void *) source
, size
);
177 clean_cache_range((void *) dest
, size
);
180 EXPORT_SYMBOL_GPL(__memcpy_flushcache
);
182 void memcpy_page_flushcache(char *to
, struct page
*page
, size_t offset
,
185 char *from
= kmap_atomic(page
);
187 memcpy_flushcache(to
, from
+ offset
, len
);