2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Unified implementation of memcpy, memmove and the __copy_user backend.
8 * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
9 * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
10 * Copyright (C) 2002 Broadcom, Inc.
11 * memcpy/copy_user author: Mark Vandevoorde
13 * Mnemonic names for arguments to memcpy/__copy_user
17 #include <asm/asm-offsets.h>
18 #include <asm/regdef.h>
27 * memcpy copies len bytes from src to dst and sets v0 to dst.
29 * - src and dst don't overlap
32 * memcpy uses the standard calling convention
34 * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
35 * the number of uncopied bytes due to an exception caused by a read or write.
36 * __copy_user assumes that src and dst don't overlap, and that the call is
37 * implementing one of the following:
39 * - src is readable (no exceptions when reading src)
41 * - dst is writable (no exceptions when writing dst)
42 * __copy_user uses a non-standard calling convention; see
43 * arch/mips/include/asm/uaccess.h
45 * When an exception happens on a load, the handler must
46 # ensure that all of the destination buffer is overwritten to prevent
47 * leaking information to user mode programs.
55 * The exception handler for loads requires that:
56 * 1- AT contain the address of the byte just past the end of the source
58 * 2- src_entry <= src < AT, and
59 * 3- (dst - src) == (dst_entry - src_entry),
60 * The _entry suffix denotes values when __copy_user was called.
62 * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
63 * (2) is met by incrementing src by the number of bytes copied
64 * (3) is met by not doing loads between a pair of increments of dst and src
66 * The exception handlers for stores adjust len (if necessary) and return.
67 * These handlers do not need to overwrite any data.
69 * For __rmemcpy and memmove an exception is always a kernel bug, therefore
70 * they're not protected.
73 #define EXC(inst_reg,addr,handler) \
75 .section __ex_table,"a"; \
80 * Only on the 64-bit kernel we can made use of 64-bit registers.
105 * As we are sharing code base with the mips32 tree (which use the o32 ABI
106 * register definitions). We need to redefine the register definitions from
107 * the n64 ABI register naming to the o32 ABI register naming.
140 #endif /* USE_DOUBLE */
142 #ifdef CONFIG_CPU_LITTLE_ENDIAN
143 #define LDFIRST LOADR
145 #define STFIRST STORER
146 #define STREST STOREL
147 #define SHIFT_DISCARD SLLV
149 #define LDFIRST LOADL
151 #define STFIRST STOREL
152 #define STREST STORER
153 #define SHIFT_DISCARD SRLV
156 #define FIRST(unit) ((unit)*NBYTES)
157 #define REST(unit) (FIRST(unit)+NBYTES-1)
158 #define UNIT(unit) FIRST(unit)
160 #define ADDRMASK (NBYTES-1)
167 * t7 is used as a flag to note inatomic mode.
169 LEAF(__copy_user_inatomic)
172 END(__copy_user_inatomic)
175 * A combined memcpy/__copy_user
176 * __copy_user sets len to 0 for success; else to an upper bound of
177 * the number of uncopied bytes.
178 * memcpy sets v0 to dst.
181 LEAF(memcpy) /* a0=dst a1=src a2=len */
182 move v0, dst /* return value */
185 li t7, 0 /* not inatomic */
188 * Note: dst & src may be unaligned, len may be 0
192 # Octeon doesn't care if the destination is unaligned. The hardware
193 # can fix it faster than we can special case the assembly.
196 sltu t0, len, NBYTES # Check if < 1 word
197 bnez t0, copy_bytes_checklen
198 and t0, src, ADDRMASK # Check if src unaligned
199 bnez t0, src_unaligned
200 sltu t0, len, 4*NBYTES # Check if < 4 words
201 bnez t0, less_than_4units
202 sltu t0, len, 8*NBYTES # Check if < 8 words
203 bnez t0, less_than_8units
204 sltu t0, len, 16*NBYTES # Check if < 16 words
205 bnez t0, cleanup_both_aligned
206 sltu t0, len, 128+1 # Check if len < 129
207 bnez t0, 1f # Skip prefetch if len is too short
208 sltu t0, len, 256+1 # Check if len < 257
209 bnez t0, 1f # Skip prefetch if len is too short
210 pref 0, 128(src) # We must not prefetch invalid addresses
212 # This is where we loop if there is more than 128 bytes left
213 2: pref 0, 256(src) # We must not prefetch invalid addresses
215 # This is where we loop if we can't prefetch anymore
217 EXC( LOAD t0, UNIT(0)(src), l_exc)
218 EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
219 EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
220 EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
221 SUB len, len, 16*NBYTES
222 EXC( STORE t0, UNIT(0)(dst), s_exc_p16u)
223 EXC( STORE t1, UNIT(1)(dst), s_exc_p15u)
224 EXC( STORE t2, UNIT(2)(dst), s_exc_p14u)
225 EXC( STORE t3, UNIT(3)(dst), s_exc_p13u)
226 EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
227 EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
228 EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
229 EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
230 EXC( STORE t0, UNIT(4)(dst), s_exc_p12u)
231 EXC( STORE t1, UNIT(5)(dst), s_exc_p11u)
232 EXC( STORE t2, UNIT(6)(dst), s_exc_p10u)
233 ADD src, src, 16*NBYTES
234 EXC( STORE t3, UNIT(7)(dst), s_exc_p9u)
235 ADD dst, dst, 16*NBYTES
236 EXC( LOAD t0, UNIT(-8)(src), l_exc_copy)
237 EXC( LOAD t1, UNIT(-7)(src), l_exc_copy)
238 EXC( LOAD t2, UNIT(-6)(src), l_exc_copy)
239 EXC( LOAD t3, UNIT(-5)(src), l_exc_copy)
240 EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u)
241 EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u)
242 EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u)
243 EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u)
244 EXC( LOAD t0, UNIT(-4)(src), l_exc_copy)
245 EXC( LOAD t1, UNIT(-3)(src), l_exc_copy)
246 EXC( LOAD t2, UNIT(-2)(src), l_exc_copy)
247 EXC( LOAD t3, UNIT(-1)(src), l_exc_copy)
248 EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u)
249 EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u)
250 EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u)
251 EXC( STORE t3, UNIT(-1)(dst), s_exc_p1u)
252 sltu t0, len, 256+1 # See if we can prefetch more
254 sltu t0, len, 128 # See if we can loop more time
258 # Jump here if there are less than 16*NBYTES left.
260 cleanup_both_aligned:
262 sltu t0, len, 8*NBYTES
263 bnez t0, less_than_8units
265 EXC( LOAD t0, UNIT(0)(src), l_exc)
266 EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
267 EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
268 EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
269 SUB len, len, 8*NBYTES
270 EXC( STORE t0, UNIT(0)(dst), s_exc_p8u)
271 EXC( STORE t1, UNIT(1)(dst), s_exc_p7u)
272 EXC( STORE t2, UNIT(2)(dst), s_exc_p6u)
273 EXC( STORE t3, UNIT(3)(dst), s_exc_p5u)
274 EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
275 EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
276 EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
277 EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
278 EXC( STORE t0, UNIT(4)(dst), s_exc_p4u)
279 EXC( STORE t1, UNIT(5)(dst), s_exc_p3u)
280 EXC( STORE t2, UNIT(6)(dst), s_exc_p2u)
281 EXC( STORE t3, UNIT(7)(dst), s_exc_p1u)
282 ADD src, src, 8*NBYTES
284 ADD dst, dst, 8*NBYTES
286 # Jump here if there are less than 8*NBYTES left.
289 sltu t0, len, 4*NBYTES
290 bnez t0, less_than_4units
292 EXC( LOAD t0, UNIT(0)(src), l_exc)
293 EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
294 EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
295 EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
296 SUB len, len, 4*NBYTES
297 EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
298 EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
299 EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
300 EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
301 ADD src, src, 4*NBYTES
303 ADD dst, dst, 4*NBYTES
305 # Jump here if there are less than 4*NBYTES left. This means
306 # we may need to copy up to 3 NBYTES words.
309 sltu t0, len, 1*NBYTES
310 bnez t0, copy_bytes_checklen
313 # 1) Copy NBYTES, then check length again
315 EXC( LOAD t0, 0(src), l_exc)
318 EXC( STORE t0, 0(dst), s_exc_p1u)
320 bnez t1, copy_bytes_checklen
323 # 2) Copy NBYTES, then check length again
325 EXC( LOAD t0, 0(src), l_exc)
328 EXC( STORE t0, 0(dst), s_exc_p1u)
330 bnez t1, copy_bytes_checklen
333 # 3) Copy NBYTES, then check length again
335 EXC( LOAD t0, 0(src), l_exc)
339 b copy_bytes_checklen
340 EXC( STORE t0, -8(dst), s_exc_p1u)
344 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
345 beqz t0, cleanup_src_unaligned
346 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
349 * Avoid consecutive LD*'s to the same register since some mips
350 * implementations can't issue them in the same cycle.
351 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
352 * are to the same unit (unless src is aligned, but it's not).
354 EXC( LDFIRST t0, FIRST(0)(src), l_exc)
355 EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy)
356 SUB len, len, 4*NBYTES
357 EXC( LDREST t0, REST(0)(src), l_exc_copy)
358 EXC( LDREST t1, REST(1)(src), l_exc_copy)
359 EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy)
360 EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy)
361 EXC( LDREST t2, REST(2)(src), l_exc_copy)
362 EXC( LDREST t3, REST(3)(src), l_exc_copy)
363 ADD src, src, 4*NBYTES
364 EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
365 EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
366 EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
367 EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
369 ADD dst, dst, 4*NBYTES
371 cleanup_src_unaligned:
373 and rem, len, NBYTES-1 # rem = len % NBYTES
374 beq rem, len, copy_bytes
377 EXC( LDFIRST t0, FIRST(0)(src), l_exc)
378 EXC( LDREST t0, REST(0)(src), l_exc_copy)
380 EXC( STORE t0, 0(dst), s_exc_p1u)
389 /* 0 < len < NBYTES */
390 #define COPY_BYTE(N) \
391 EXC( lb t0, N(src), l_exc); \
394 EXC( sb t0, N(dst), s_exc_p1)
404 EXC( lb t0, NBYTES-2(src), l_exc)
407 EXC( sb t0, NBYTES-2(dst), s_exc_p1)
415 * Copy bytes from src until faulting load address (or until a
418 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
419 * may be more than a byte beyond the last address.
420 * Hence, the lb below may get an exception.
422 * Assumes src < THREAD_BUADDR($28)
424 LOAD t0, TI_TASK($28)
425 LOAD t0, THREAD_BUADDR(t0)
427 EXC( lb t1, 0(src), l_exc)
429 sb t1, 0(dst) # can't fault -- we're copy_from_user
433 LOAD t0, TI_TASK($28)
434 LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
435 SUB len, AT, t0 # len number of uncopied bytes
436 bnez t7, 2f /* Skip the zeroing out part if inatomic */
438 * Here's where we rely on src and dst being incremented in tandem,
440 * dst += (fault addr - src) to put dst at first byte to clear
442 ADD dst, t0 # compute start address in a1
445 * Clear len bytes starting at dst. Can't call __bzero because it
446 * might modify len. An inefficient loop for these rare times...
461 ADD len, len, n*NBYTES
491 sltu t0, a1, t0 # dst + len <= src -> memcpy
492 sltu t1, a0, t1 # dst >= src + len -> memcpy
495 move v0, a0 /* return value */
499 /* fall through to __rmemcpy */
500 LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
502 beqz t0, r_end_bytes_up # src >= dst
504 ADD a0, a2 # dst = dst + len
505 ADD a1, a2 # src = src + len
524 bnez a2, r_end_bytes_up