2 * Memory copy functions for 32-bit PowerPC.
4 * Copyright (C) 1996-2005 Paul Mackerras.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <asm/processor.h>
12 #include <asm/cache.h>
13 #include <asm/errno.h>
14 #include <asm/ppc_asm.h>
15 #include <asm/export.h>
17 #define COPY_16_BYTES \
27 #define COPY_16_BYTES_WITHEX(n) \
45 #define COPY_16_BYTES_EXCODE(n) \
47 addi r5,r5,-(16 * n); \
50 addi r5,r5,-(16 * n); \
52 EX_TABLE(8 ## n ## 0b,9 ## n ## 0b); \
53 EX_TABLE(8 ## n ## 1b,9 ## n ## 0b); \
54 EX_TABLE(8 ## n ## 2b,9 ## n ## 0b); \
55 EX_TABLE(8 ## n ## 3b,9 ## n ## 0b); \
56 EX_TABLE(8 ## n ## 4b,9 ## n ## 1b); \
57 EX_TABLE(8 ## n ## 5b,9 ## n ## 1b); \
58 EX_TABLE(8 ## n ## 6b,9 ## n ## 1b); \
59 EX_TABLE(8 ## n ## 7b,9 ## n ## 1b)
62 .stabs "arch/powerpc/lib/",N_SO,0,0,0f
63 .stabs "copy_32.S",N_SO,0,0,0f
66 CACHELINE_BYTES = L1_CACHE_BYTES
67 LG_CACHELINE_BYTES = L1_CACHE_SHIFT
68 CACHELINE_MASK = (L1_CACHE_BYTES-1)
71 rlwinm. r0 ,r5, 31, 1, 31
74 rlwimi r4 ,r4 ,16 ,0 ,15
82 EXPORT_SYMBOL(memset16)
85 * Use dcbz on the complete cache lines in the destination
86 * to set them to zero. This requires that the destination
87 * area is cacheable. -- paulus
89 * During early init, cache might not be active yet, so dcbz cannot be used.
90 * We therefore skip the optimised bloc that uses dcbz. This jump is
91 * replaced by a nop once cache is active. This is done in machine_init()
107 * Skip optimised bloc until cache is enabled. Will be replaced
108 * by 'bne' during boot to use normal procedure if r4 is not zero
110 _GLOBAL(memset_nocache_branch)
113 clrlwi r7,r6,32-LG_CACHELINE_BYTES
115 srwi r9,r8,LG_CACHELINE_BYTES
116 addic. r9,r9,-1 /* total number of complete cachelines */
118 xori r0,r7,CACHELINE_MASK & ~3
127 addi r6,r6,CACHELINE_BYTES
129 clrlwi r5,r8,32-LG_CACHELINE_BYTES
152 EXPORT_SYMBOL(memset)
155 * This version uses dcbz on the complete cache lines in the
156 * destination area to reduce memory traffic. This requires that
157 * the destination area is cacheable.
158 * We only use this version if the source and dest don't overlap.
161 * During early init, cache might not be active yet, so dcbz cannot be used.
162 * We therefore jump to generic_memcpy which doesn't use dcbz. This jump is
163 * replaced by a nop once cache is active. This is done in machine_init()
172 add r7,r3,r5 /* test if the src & dst overlap */
176 crand 0,0,4 /* cr0.lt &= cr1.lt */
177 blt generic_memcpy /* if regions overlap */
182 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
185 cmplw 0,r5,r0 /* is this more than total to do? */
186 blt 63f /* if not much to do */
187 andi. r8,r0,3 /* get it word-aligned first */
191 70: lbz r9,4(r4) /* do some bytes */
199 72: lwzu r9,4(r4) /* do some words */
203 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
204 clrlwi r5,r5,32-LG_CACHELINE_BYTES
211 #if L1_CACHE_BYTES >= 32
213 #if L1_CACHE_BYTES >= 64
216 #if L1_CACHE_BYTES >= 128
242 EXPORT_SYMBOL(memcpy)
243 EXPORT_SYMBOL(memmove)
249 beq 2f /* if less than 8 bytes to do */
250 andi. r0,r6,3 /* get dest word aligned */
281 rlwinm. r7,r5,32-3,3,31
286 _GLOBAL(backwards_memcpy)
287 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
317 rlwinm. r7,r5,32-3,3,31
322 _GLOBAL(__copy_tofrom_user)
326 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
329 cmplw 0,r5,r0 /* is this more than total to do? */
330 blt 63f /* if not much to do */
331 andi. r8,r0,3 /* get it word-aligned first */
334 70: lbz r9,4(r4) /* do some bytes */
343 72: lwzu r9,4(r4) /* do some words */
352 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
353 clrlwi r5,r5,32-LG_CACHELINE_BYTES
357 /* Here we decide how far ahead to prefetch the source */
363 #if MAX_COPY_PREFETCH > 1
364 /* Heuristically, for large transfers we prefetch
365 MAX_COPY_PREFETCH cachelines ahead. For small transfers
366 we prefetch 1 cacheline ahead. */
367 cmpwi r0,MAX_COPY_PREFETCH
369 li r7,MAX_COPY_PREFETCH
372 addi r3,r3,CACHELINE_BYTES
376 addi r3,r3,CACHELINE_BYTES
377 #endif /* MAX_COPY_PREFETCH > 1 */
386 /* the main body of the cacheline loop */
387 COPY_16_BYTES_WITHEX(0)
388 #if L1_CACHE_BYTES >= 32
389 COPY_16_BYTES_WITHEX(1)
390 #if L1_CACHE_BYTES >= 64
391 COPY_16_BYTES_WITHEX(2)
392 COPY_16_BYTES_WITHEX(3)
393 #if L1_CACHE_BYTES >= 128
394 COPY_16_BYTES_WITHEX(4)
395 COPY_16_BYTES_WITHEX(5)
396 COPY_16_BYTES_WITHEX(6)
397 COPY_16_BYTES_WITHEX(7)
425 /* read fault, initial single-byte copy */
428 /* write fault, initial single-byte copy */
433 /* read fault, initial word copy */
436 /* write fault, initial word copy */
442 * this stuff handles faults in the cacheline loop and branches to either
443 * 104f (if in read part) or 105f (if in write part), after updating r5
445 COPY_16_BYTES_EXCODE(0)
446 #if L1_CACHE_BYTES >= 32
447 COPY_16_BYTES_EXCODE(1)
448 #if L1_CACHE_BYTES >= 64
449 COPY_16_BYTES_EXCODE(2)
450 COPY_16_BYTES_EXCODE(3)
451 #if L1_CACHE_BYTES >= 128
452 COPY_16_BYTES_EXCODE(4)
453 COPY_16_BYTES_EXCODE(5)
454 COPY_16_BYTES_EXCODE(6)
455 COPY_16_BYTES_EXCODE(7)
460 /* read fault in cacheline loop */
463 /* fault on dcbz (effectively a write fault) */
464 /* or write fault in cacheline loop */
466 92: li r3,LG_CACHELINE_BYTES
470 /* read fault in final word loop */
473 /* write fault in final word loop */
478 /* read fault in final byte loop */
481 /* write fault in final byte loop */
486 * At this stage the number of bytes not copied is
487 * r5 + (ctr << r3), and r9 is 0 for read or 1 for write.
492 beq 120f /* shouldn't happen */
495 /* for a read fault, first try to continue the copy one byte at a time */
502 /* then clear out the destination: r3 bytes starting at 4(r6) */
513 EXPORT_SYMBOL(__copy_tofrom_user)