1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Memory copy functions for 32-bit PowerPC.
5 * Copyright (C) 1996-2005 Paul Mackerras.
7 #include <linux/export.h>
8 #include <asm/processor.h>
10 #include <asm/errno.h>
11 #include <asm/ppc_asm.h>
12 #include <asm/code-patching-asm.h>
13 #include <asm/kasan.h>
15 #define COPY_16_BYTES \
25 #define COPY_16_BYTES_WITHEX(n) \
43 #define COPY_16_BYTES_EXCODE(n) \
45 addi r5,r5,-(16 * n); \
48 addi r5,r5,-(16 * n); \
50 EX_TABLE(8 ## n ## 0b,9 ## n ## 0b); \
51 EX_TABLE(8 ## n ## 1b,9 ## n ## 0b); \
52 EX_TABLE(8 ## n ## 2b,9 ## n ## 0b); \
53 EX_TABLE(8 ## n ## 3b,9 ## n ## 0b); \
54 EX_TABLE(8 ## n ## 4b,9 ## n ## 1b); \
55 EX_TABLE(8 ## n ## 5b,9 ## n ## 1b); \
56 EX_TABLE(8 ## n ## 6b,9 ## n ## 1b); \
57 EX_TABLE(8 ## n ## 7b,9 ## n ## 1b)
61 CACHELINE_BYTES = L1_CACHE_BYTES
62 LG_CACHELINE_BYTES = L1_CACHE_SHIFT
63 CACHELINE_MASK = (L1_CACHE_BYTES-1)
67 rlwinm. r0 ,r5, 31, 1, 31
70 rlwimi r4 ,r4 ,16 ,0 ,15
78 EXPORT_SYMBOL(memset16)
82 * Use dcbz on the complete cache lines in the destination
83 * to set them to zero. This requires that the destination
84 * area is cacheable. -- paulus
86 * During early init, cache might not be active yet, so dcbz cannot be used.
87 * We therefore skip the optimised bloc that uses dcbz. This jump is
88 * replaced by a nop once cache is active. This is done in machine_init()
104 * Skip optimised bloc until cache is enabled. Will be replaced
105 * by 'bne' during boot to use normal procedure if r4 is not zero
108 patch_site 5b, patch__memset_nocache
110 clrlwi r7,r6,32-LG_CACHELINE_BYTES
112 srwi r9,r8,LG_CACHELINE_BYTES
113 addic. r9,r9,-1 /* total number of complete cachelines */
115 xori r0,r7,CACHELINE_MASK & ~3
124 addi r6,r6,CACHELINE_BYTES
126 clrlwi r5,r8,32-LG_CACHELINE_BYTES
149 EXPORT_SYMBOL(memset)
150 EXPORT_SYMBOL_KASAN(memset)
153 * This version uses dcbz on the complete cache lines in the
154 * destination area to reduce memory traffic. This requires that
155 * the destination area is cacheable.
156 * We only use this version if the source and dest don't overlap.
159 * During early init, cache might not be active yet, so dcbz cannot be used.
160 * We therefore jump to generic_memcpy which doesn't use dcbz. This jump is
161 * replaced by a nop once cache is active. This is done in machine_init()
163 _GLOBAL_KASAN(memmove)
168 _GLOBAL_KASAN(memcpy)
170 patch_site 1b, patch__memcpy_nocache
172 add r7,r3,r5 /* test if the src & dst overlap */
176 crand 0,0,4 /* cr0.lt &= cr1.lt */
177 blt generic_memcpy /* if regions overlap */
182 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
185 cmplw 0,r5,r0 /* is this more than total to do? */
186 blt 63f /* if not much to do */
187 andi. r8,r0,3 /* get it word-aligned first */
191 70: lbz r9,4(r4) /* do some bytes */
199 72: lwzu r9,4(r4) /* do some words */
203 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
204 clrlwi r5,r5,32-LG_CACHELINE_BYTES
211 #if L1_CACHE_BYTES >= 32
213 #if L1_CACHE_BYTES >= 64
216 #if L1_CACHE_BYTES >= 128
242 EXPORT_SYMBOL(memcpy)
243 EXPORT_SYMBOL(memmove)
244 EXPORT_SYMBOL_KASAN(memcpy)
245 EXPORT_SYMBOL_KASAN(memmove)
251 beq 2f /* if less than 8 bytes to do */
252 andi. r0,r6,3 /* get dest word aligned */
283 rlwinm. r7,r5,32-3,3,31
288 _GLOBAL(backwards_memcpy)
289 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
319 rlwinm. r7,r5,32-3,3,31
324 _GLOBAL(__copy_tofrom_user)
328 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
331 cmplw 0,r5,r0 /* is this more than total to do? */
332 blt 63f /* if not much to do */
333 andi. r8,r0,3 /* get it word-aligned first */
336 70: lbz r9,4(r4) /* do some bytes */
345 72: lwzu r9,4(r4) /* do some words */
354 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
355 clrlwi r5,r5,32-LG_CACHELINE_BYTES
359 /* Here we decide how far ahead to prefetch the source */
365 #if MAX_COPY_PREFETCH > 1
366 /* Heuristically, for large transfers we prefetch
367 MAX_COPY_PREFETCH cachelines ahead. For small transfers
368 we prefetch 1 cacheline ahead. */
369 cmpwi r0,MAX_COPY_PREFETCH
371 li r7,MAX_COPY_PREFETCH
374 addi r3,r3,CACHELINE_BYTES
378 addi r3,r3,CACHELINE_BYTES
379 #endif /* MAX_COPY_PREFETCH > 1 */
388 /* the main body of the cacheline loop */
389 COPY_16_BYTES_WITHEX(0)
390 #if L1_CACHE_BYTES >= 32
391 COPY_16_BYTES_WITHEX(1)
392 #if L1_CACHE_BYTES >= 64
393 COPY_16_BYTES_WITHEX(2)
394 COPY_16_BYTES_WITHEX(3)
395 #if L1_CACHE_BYTES >= 128
396 COPY_16_BYTES_WITHEX(4)
397 COPY_16_BYTES_WITHEX(5)
398 COPY_16_BYTES_WITHEX(6)
399 COPY_16_BYTES_WITHEX(7)
427 /* read fault, initial single-byte copy */
430 /* write fault, initial single-byte copy */
435 /* read fault, initial word copy */
438 /* write fault, initial word copy */
444 * this stuff handles faults in the cacheline loop and branches to either
445 * 104f (if in read part) or 105f (if in write part), after updating r5
447 COPY_16_BYTES_EXCODE(0)
448 #if L1_CACHE_BYTES >= 32
449 COPY_16_BYTES_EXCODE(1)
450 #if L1_CACHE_BYTES >= 64
451 COPY_16_BYTES_EXCODE(2)
452 COPY_16_BYTES_EXCODE(3)
453 #if L1_CACHE_BYTES >= 128
454 COPY_16_BYTES_EXCODE(4)
455 COPY_16_BYTES_EXCODE(5)
456 COPY_16_BYTES_EXCODE(6)
457 COPY_16_BYTES_EXCODE(7)
462 /* read fault in cacheline loop */
465 /* fault on dcbz (effectively a write fault) */
466 /* or write fault in cacheline loop */
468 92: li r3,LG_CACHELINE_BYTES
472 /* read fault in final word loop */
475 /* write fault in final word loop */
480 /* read fault in final byte loop */
483 /* write fault in final byte loop */
488 * At this stage the number of bytes not copied is
489 * r5 + (ctr << r3), and r9 is 0 for read or 1 for write.
494 beq 120f /* shouldn't happen */
497 /* for a read fault, first try to continue the copy one byte at a time */
504 /* then clear out the destination: r3 bytes starting at 4(r6) */
515 EXPORT_SYMBOL(__copy_tofrom_user)