1 // SPDX-License-Identifier: GPL-2.0-only
3 * User address space access functions.
5 * Copyright 1997 Andi Kleen <ak@muc.de>
6 * Copyright 1997 Linus Torvalds
7 * Copyright 2002 Andi Kleen <ak@suse.de>
9 #include <linux/export.h>
10 #include <linux/uaccess.h>
11 #include <linux/highmem.h>
12 #include <linux/libnvdimm.h>
18 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
20 * clean_cache_range - write back a cache range with CLWB
21 * @vaddr: virtual start address
22 * @size: number of bytes to write back
24 * Write back a cache range using the CLWB (cache line write back)
25 * instruction. Note that @size is internally rounded up to be cache
28 static void clean_cache_range(void *addr
, size_t size
)
30 u16 x86_clflush_size
= boot_cpu_data
.x86_clflush_size
;
31 unsigned long clflush_mask
= x86_clflush_size
- 1;
32 void *vend
= addr
+ size
;
35 for (p
= (void *)((unsigned long)addr
& ~clflush_mask
);
36 p
< vend
; p
+= x86_clflush_size
)
40 void arch_wb_cache_pmem(void *addr
, size_t size
)
42 clean_cache_range(addr
, size
);
44 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem
);
46 long __copy_user_flushcache(void *dst
, const void __user
*src
, unsigned size
)
48 unsigned long flushed
, dest
= (unsigned long) dst
;
52 rc
= __copy_user_nocache(dst
, src
, size
);
56 * __copy_user_nocache() uses non-temporal stores for the bulk
57 * of the transfer, but we need to manually flush if the
58 * transfer is unaligned. A cached memory copy is used when
59 * destination or size is not naturally aligned. That is:
60 * - Require 8-byte alignment when size is 8 bytes or larger.
61 * - Require 4-byte alignment when size is 4 bytes.
64 if (!IS_ALIGNED(dest
, 4) || size
!= 4)
65 clean_cache_range(dst
, size
);
67 if (!IS_ALIGNED(dest
, 8)) {
68 dest
= ALIGN(dest
, boot_cpu_data
.x86_clflush_size
);
69 clean_cache_range(dst
, 1);
72 flushed
= dest
- (unsigned long) dst
;
73 if (size
> flushed
&& !IS_ALIGNED(size
- flushed
, 8))
74 clean_cache_range(dst
+ size
- 1, 1);
80 void __memcpy_flushcache(void *_dst
, const void *_src
, size_t size
)
82 unsigned long dest
= (unsigned long) _dst
;
83 unsigned long source
= (unsigned long) _src
;
85 /* cache copy and flush to align dest */
86 if (!IS_ALIGNED(dest
, 8)) {
87 size_t len
= min_t(size_t, size
, ALIGN(dest
, 8) - dest
);
89 memcpy((void *) dest
, (void *) source
, len
);
90 clean_cache_range((void *) dest
, len
);
100 asm("movq (%0), %%r8\n"
102 "movq 16(%0), %%r10\n"
103 "movq 24(%0), %%r11\n"
104 "movnti %%r8, (%1)\n"
105 "movnti %%r9, 8(%1)\n"
106 "movnti %%r10, 16(%1)\n"
107 "movnti %%r11, 24(%1)\n"
108 :: "r" (source
), "r" (dest
)
109 : "memory", "r8", "r9", "r10", "r11");
115 /* 1x8 movnti loop */
117 asm("movq (%0), %%r8\n"
118 "movnti %%r8, (%1)\n"
119 :: "r" (source
), "r" (dest
)
126 /* 1x4 movnti loop */
128 asm("movl (%0), %%r8d\n"
129 "movnti %%r8d, (%1)\n"
130 :: "r" (source
), "r" (dest
)
137 /* cache copy for remaining bytes */
139 memcpy((void *) dest
, (void *) source
, size
);
140 clean_cache_range((void *) dest
, size
);
143 EXPORT_SYMBOL_GPL(__memcpy_flushcache
);