2 * User Space Access Routines
4 * Copyright (C) 2000-2002 Hewlett-Packard (John Marvin)
5 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
6 * Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
7 * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
8 * Copyright (C) 2017 Helge Deller <deller@gmx.de>
9 * Copyright (C) 2017 John David Anglin <dave.anglin@bell.net>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 * These routines still have plenty of room for optimization
29 * (word & doubleword load/store, dual issue, store hints, etc.).
33 * The following routines assume that space register 3 (sr3) contains
34 * the space id associated with the current users address space.
40 #include <asm/assembly.h>
41 #include <asm/errno.h>
42 #include <linux/linkage.h>
45 * get_sr gets the appropriate space value into
46 * sr1 for kernel/user space access, depending
47 * on the flag stored in the task structure.
52 ldw TI_SEGMENT(%r1),%r22
59 .macro fixup_branch lbl
66 * unsigned long lclear_user(void *to, unsigned long n)
68 * Returns 0 for success.
69 * otherwise, returns number of bytes not transferred.
72 ENTRY_CFI(lclear_user)
76 comib,=,n 0,%r25,$lclu_done
79 addib,<> -1,%r25,$lclu_loop
80 1: stbs,ma %r0,1(%sr1,%r26)
86 ENDPROC_CFI(lclear_user)
89 2: fixup_branch $lclu_done
93 ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
98 * long lstrnlen_user(char *s, long n)
100 * Returns 0 if exception before zero byte or reaching N,
101 * N+1 if N would be exceeded,
102 * else strlen + 1 (i.e. includes zero byte).
105 ENTRY_CFI(lstrnlen_user)
109 comib,= 0,%r25,$lslen_nzero
112 1: ldbs,ma 1(%sr1,%r26),%r1
114 comib,=,n 0,%r1,$lslen_done
115 addib,<> -1,%r25,$lslen_loop
116 2: ldbs,ma 1(%sr1,%r26),%r1
124 ldo 1(%r26),%r26 /* special case for N == 0 */
125 ENDPROC_CFI(lstrnlen_user)
128 3: fixup_branch $lslen_done
129 copy %r24,%r26 /* reset r26 so 0 is returned on fault */
132 ASM_EXCEPTIONTABLE_ENTRY(1b,3b)
133 ASM_EXCEPTIONTABLE_ENTRY(2b,3b)
140 * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
143 * - sr1 already contains space of source region
144 * - sr2 already contains space of destination region
147 * - number of bytes that could not be copied.
148 * On success, this will be zero.
150 * This code is based on a C-implementation of a copy routine written by
151 * Randolph Chung, which in turn was derived from the glibc.
153 * Several strategies are tried to try to get the best performance for various
154 * conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes
155 * at a time using general registers. Unaligned copies are handled either by
156 * aligning the destination and then using shift-and-write method, or in a few
157 * cases by falling back to a byte-at-a-time copy.
159 * Testing with various alignments and buffer sizes shows that this code is
160 * often >10x faster than a simple byte-at-a-time copy, even for strangely
161 * aligned operands. It is interesting to note that the glibc version of memcpy
162 * (written in C) is actually quite fast already. This routine is able to beat
163 * it by 30-40% for aligned copies because of the loop unrolling, but in some
164 * cases the glibc version is still slightly faster. This lends more
165 * credibility that gcc can generate very good code as long as we are careful.
167 * Possible optimizations:
168 * - add cache prefetching
169 * - try not to use the post-increment address modifiers; they may create
170 * additional interlocks. Assumption is that those were only efficient on old
171 * machines (pre PA8000 processors)
200 /* Last destination address */
203 /* short copy with less than 16 bytes? */
204 cmpib,COND(>>=),n 15,len,.Lbyte_loop
206 /* same alignment? */
209 cmpib,<>,n 0,t1,.Lunaligned_copy
212 /* only do 64-bit copies if we can get aligned. */
214 cmpib,<>,n 0,t1,.Lalign_loop32
216 /* loop until we are 64-bit aligned */
219 cmpib,=,n 0,t1,.Lcopy_loop_16_start
220 20: ldb,ma 1(srcspc,src),t1
221 21: stb,ma t1,1(dstspc,dst)
225 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
226 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
228 .Lcopy_loop_16_start:
231 cmpb,COND(>>=),n t0,len,.Lword_loop
233 10: ldd 0(srcspc,src),t1
234 11: ldd 8(srcspc,src),t2
236 12: std,ma t1,8(dstspc,dst)
237 13: std,ma t2,8(dstspc,dst)
238 14: ldd 0(srcspc,src),t1
239 15: ldd 8(srcspc,src),t2
241 16: std,ma t1,8(dstspc,dst)
242 17: std,ma t2,8(dstspc,dst)
244 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
245 ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault)
246 ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
247 ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
248 ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
249 ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault)
250 ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
251 ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
257 cmpib,COND(>>=),n 3,len,.Lbyte_loop
258 20: ldw,ma 4(srcspc,src),t1
259 21: stw,ma t1,4(dstspc,dst)
263 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
264 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
266 #endif /* CONFIG_64BIT */
268 /* loop until we are 32-bit aligned */
271 cmpib,=,n 0,t1,.Lcopy_loop_8
272 20: ldb,ma 1(srcspc,src),t1
273 21: stb,ma t1,1(dstspc,dst)
277 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
278 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
282 cmpib,COND(>>=),n 15,len,.Lbyte_loop
284 10: ldw 0(srcspc,src),t1
285 11: ldw 4(srcspc,src),t2
286 12: stw,ma t1,4(dstspc,dst)
287 13: stw,ma t2,4(dstspc,dst)
288 14: ldw 8(srcspc,src),t1
289 15: ldw 12(srcspc,src),t2
291 16: stw,ma t1,4(dstspc,dst)
292 17: stw,ma t2,4(dstspc,dst)
294 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
295 ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault)
296 ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
297 ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
298 ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
299 ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault)
300 ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
301 ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
307 cmpclr,COND(<>) len,%r0,%r0
309 20: ldb 0(srcspc,src),t1
311 21: stb,ma t1,1(dstspc,dst)
315 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
316 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
323 /* src and dst are not aligned the same way. */
324 /* need to go the hard way */
326 /* align until dst is 32bit-word-aligned */
328 cmpib,=,n 0,t1,.Lcopy_dstaligned
329 20: ldb 0(srcspc,src),t1
331 21: stb,ma t1,1(dstspc,dst)
335 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
336 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
340 /* store src, dst and len in safe place */
345 /* len now needs give number of words to copy */
349 * Copy from a not-aligned src to an aligned dst using shifts.
350 * Handles 4 words per loop.
358 /* Make src aligned by rounding it down. */
366 cmpb,COND(=) %r0,len,.Lcda_finish
369 1: ldw,ma 4(srcspc,src), a3
370 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
371 1: ldw,ma 4(srcspc,src), a0
372 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
375 1: ldw,ma 4(srcspc,src), a2
376 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
377 1: ldw,ma 4(srcspc,src), a3
378 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
380 cmpb,COND(=),n %r0,len,.Ldo0
382 1: ldw,ma 4(srcspc,src), a0
383 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
384 shrpw a2, a3, %sar, t0
385 1: stw,ma t0, 4(dstspc,dst)
386 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
388 1: ldw,ma 4(srcspc,src), a1
389 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
390 shrpw a3, a0, %sar, t0
391 1: stw,ma t0, 4(dstspc,dst)
392 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
394 1: ldw,ma 4(srcspc,src), a2
395 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
396 shrpw a0, a1, %sar, t0
397 1: stw,ma t0, 4(dstspc,dst)
398 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
400 1: ldw,ma 4(srcspc,src), a3
401 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
402 shrpw a1, a2, %sar, t0
403 1: stw,ma t0, 4(dstspc,dst)
404 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
406 cmpb,COND(<>) %r0,len,.Ldo4
409 shrpw a2, a3, %sar, t0
410 1: stw,ma t0, 4(dstspc,dst)
411 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
415 /* calculate new src, dst and len and jump to byte-copy loop */
422 1: ldw,ma 4(srcspc,src), a0
423 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
424 1: ldw,ma 4(srcspc,src), a1
425 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
429 1: ldw,ma 4(srcspc,src), a1
430 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
431 1: ldw,ma 4(srcspc,src), a2
432 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
437 /* fault exception fixup handlers: */
441 10: std,ma t1,8(dstspc,dst)
442 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
447 10: stw,ma t1,4(dstspc,dst)
448 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
451 ENDPROC_CFI(pa_memcpy)