2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Quick'n'dirty IP checksum ...
8 * Copyright (C) 1998, 1999 Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2007 Maciej W. Rozycki
12 #include <linux/errno.h>
14 #include <asm/asm-offsets.h>
15 #include <asm/regdef.h>
19 * As we are sharing code base with the mips32 tree (which use the o32 ABI
20 * register definitions). We need to redefine the register definitions from
21 * the n64 ABI register naming to the o32 ABI register naming.
53 #endif /* USE_DOUBLE */
55 #define UNIT(unit) ((unit)*NBYTES)
57 #define ADDC(sum,reg) \
62 #define ADDC32(sum,reg) \
67 #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
68 LOAD _t0, (offset + UNIT(0))(src); \
69 LOAD _t1, (offset + UNIT(1))(src); \
70 LOAD _t2, (offset + UNIT(2))(src); \
71 LOAD _t3, (offset + UNIT(3))(src); \
78 #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
79 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)
81 #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
82 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3); \
83 CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3)
88 * a1: length of the area to checksum
89 * a2: partial checksum
103 bnez t8, .Lsmall_csumcpy /* < 8 bytes to copy */
106 andi t7, src, 0x1 /* odd buffer? */
109 beqz t7, .Lword_align
113 LONG_SUBU a1, a1, 0x1
118 PTR_ADDU src, src, 0x1
122 beqz t8, .Ldword_align
126 LONG_SUBU a1, a1, 0x2
129 PTR_ADDU src, src, 0x2
132 bnez t8, .Ldo_end_words
136 beqz t8, .Lqword_align
140 LONG_SUBU a1, a1, 0x4
142 PTR_ADDU src, src, 0x4
146 beqz t8, .Loword_align
151 LONG_SUBU a1, a1, 0x8
156 LONG_SUBU a1, a1, 0x8
160 PTR_ADDU src, src, 0x8
164 beqz t8, .Lbegin_movement
173 CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
175 LONG_SUBU a1, a1, 0x10
176 PTR_ADDU src, src, 0x10
184 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
185 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
186 CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
187 CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
188 LONG_SUBU t8, t8, 0x01
189 .set reorder /* DADDI_WAR */
190 PTR_ADDU src, src, 0x80
191 bnez t8, .Lmove_128bytes
199 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
200 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
201 PTR_ADDU src, src, 0x40
204 beqz t2, .Ldo_end_words
208 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
210 PTR_ADDU src, src, 0x20
213 beqz t8, .Lsmall_csumcpy
219 LONG_SUBU t8, t8, 0x1
221 .set reorder /* DADDI_WAR */
222 PTR_ADDU src, src, 0x4
226 /* unknown src alignment and < 8 bytes to go */
234 /* Still a full word to go */
238 dsll t1, t1, 32 /* clear lower 32bit */
246 /* Still a halfword to go */
272 /* odd buffer alignment? */
273 #ifdef CONFIG_CPU_MIPSR2
277 beqz t7, 1f /* odd buffer alignment? */
288 /* Add the passed partial csum. */
296 * checksum and copy routines based on memcpy.S
298 * csum_partial_copy_nocheck(src, dst, len, sum)
299 * __csum_partial_copy_user(src, dst, len, sum, errp)
301 * See "Spec" in memcpy.S for details. Unlike __copy_user, all
302 * function in this file use the standard calling convention.
314 * The exception handler for loads requires that:
315 * 1- AT contain the address of the byte just past the end of the source
317 * 2- src_entry <= src < AT, and
318 * 3- (dst - src) == (dst_entry - src_entry),
319 * The _entry suffix denotes values when __copy_user was called.
321 * (1) is set up up by __csum_partial_copy_from_user and maintained by
322 * not writing AT in __csum_partial_copy
323 * (2) is met by incrementing src by the number of bytes copied
324 * (3) is met by not doing loads between a pair of increments of dst and src
326 * The exception handlers for stores stores -EFAULT to errptr and return.
327 * These handlers do not need to overwrite any data.
330 #define EXC(inst_reg,addr,handler) \
332 .section __ex_table,"a"; \
370 #endif /* USE_DOUBLE */
372 #ifdef CONFIG_CPU_LITTLE_ENDIAN
373 #define LDFIRST LOADR
375 #define STFIRST STORER
376 #define STREST STOREL
377 #define SHIFT_DISCARD SLLV
378 #define SHIFT_DISCARD_REVERT SRLV
380 #define LDFIRST LOADL
382 #define STFIRST STOREL
383 #define STREST STORER
384 #define SHIFT_DISCARD SRLV
385 #define SHIFT_DISCARD_REVERT SLLV
388 #define FIRST(unit) ((unit)*NBYTES)
389 #define REST(unit) (FIRST(unit)+NBYTES-1)
391 #define ADDRMASK (NBYTES-1)
393 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
399 LEAF(__csum_partial_copy_user)
400 PTR_ADDU AT, src, len /* See (1) above. */
406 FEXPORT(csum_partial_copy_nocheck)
410 * Note: dst & src may be unaligned, len may be 0
414 * The "issue break"s below are very approximate.
415 * Issue delays for dcache fills will perturb the schedule, as will
416 * load queue full replay traps, etc.
418 * If len < NBYTES use byte operations.
421 and t1, dst, ADDRMASK
422 bnez t2, .Lcopy_bytes_checklen
423 and t0, src, ADDRMASK
424 andi odd, dst, 0x1 /* odd buffer? */
425 bnez t1, .Ldst_unaligned
427 bnez t0, .Lsrc_unaligned_dst_aligned
429 * use delay slot for fall-through
430 * src and dst are aligned; need to compute rem
433 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
434 beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES
436 SUB len, 8*NBYTES # subtract here for bgez loop
439 EXC( LOAD t0, UNIT(0)(src), .Ll_exc)
440 EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy)
441 EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy)
442 EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy)
443 EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy)
444 EXC( LOAD t5, UNIT(5)(src), .Ll_exc_copy)
445 EXC( LOAD t6, UNIT(6)(src), .Ll_exc_copy)
446 EXC( LOAD t7, UNIT(7)(src), .Ll_exc_copy)
447 SUB len, len, 8*NBYTES
448 ADD src, src, 8*NBYTES
449 EXC( STORE t0, UNIT(0)(dst), .Ls_exc)
451 EXC( STORE t1, UNIT(1)(dst), .Ls_exc)
453 EXC( STORE t2, UNIT(2)(dst), .Ls_exc)
455 EXC( STORE t3, UNIT(3)(dst), .Ls_exc)
457 EXC( STORE t4, UNIT(4)(dst), .Ls_exc)
459 EXC( STORE t5, UNIT(5)(dst), .Ls_exc)
461 EXC( STORE t6, UNIT(6)(dst), .Ls_exc)
463 EXC( STORE t7, UNIT(7)(dst), .Ls_exc)
465 .set reorder /* DADDI_WAR */
466 ADD dst, dst, 8*NBYTES
469 ADD len, 8*NBYTES # revert len (see above)
472 * len == the number of bytes left to copy < 8*NBYTES
474 .Lcleanup_both_aligned:
477 sltu t0, len, 4*NBYTES
478 bnez t0, .Lless_than_4units
479 and rem, len, (NBYTES-1) # rem = len % NBYTES
483 EXC( LOAD t0, UNIT(0)(src), .Ll_exc)
484 EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy)
485 EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy)
486 EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy)
487 SUB len, len, 4*NBYTES
488 ADD src, src, 4*NBYTES
489 EXC( STORE t0, UNIT(0)(dst), .Ls_exc)
491 EXC( STORE t1, UNIT(1)(dst), .Ls_exc)
493 EXC( STORE t2, UNIT(2)(dst), .Ls_exc)
495 EXC( STORE t3, UNIT(3)(dst), .Ls_exc)
497 .set reorder /* DADDI_WAR */
498 ADD dst, dst, 4*NBYTES
505 beq rem, len, .Lcopy_bytes
508 EXC( LOAD t0, 0(src), .Ll_exc)
511 EXC( STORE t0, 0(dst), .Ls_exc)
513 .set reorder /* DADDI_WAR */
519 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
520 * A loop would do only a byte at a time with possible branch
521 * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
522 * because can't assume read-access to dst. Instead, use
523 * STREST dst, which doesn't require read access to dst.
525 * This code should perform better than a simple loop on modern,
526 * wide-issue mips processors because the code has fewer branches and
527 * more instruction-level parallelism.
531 ADD t1, dst, len # t1 is just past last byte of dst
533 SLL rem, len, 3 # rem = number of bits to keep
534 EXC( LOAD t0, 0(src), .Ll_exc)
535 SUB bits, bits, rem # bits = number of bits to discard
536 SHIFT_DISCARD t0, t0, bits
537 EXC( STREST t0, -1(t1), .Ls_exc)
538 SHIFT_DISCARD_REVERT t0, t0, bits
546 * t0 = src & ADDRMASK
547 * t1 = dst & ADDRMASK; T1 > 0
550 * Copy enough bytes to align dst
551 * Set match = (src and dst have same alignment)
554 EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
556 EXC( LDREST t3, REST(0)(src), .Ll_exc_copy)
557 SUB t2, t2, t1 # t2 = number of bytes copied
559 EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
560 SLL t4, t1, 3 # t4 = number of bits to discard
561 SHIFT_DISCARD t3, t3, t4
562 /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
567 beqz match, .Lboth_aligned
570 .Lsrc_unaligned_dst_aligned:
571 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
572 beqz t0, .Lcleanup_src_unaligned
573 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
576 * Avoid consecutive LD*'s to the same register since some mips
577 * implementations can't issue them in the same cycle.
578 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
579 * are to the same unit (unless src is aligned, but it's not).
581 EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
582 EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
583 SUB len, len, 4*NBYTES
584 EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
585 EXC( LDREST t1, REST(1)(src), .Ll_exc_copy)
586 EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
587 EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
588 EXC( LDREST t2, REST(2)(src), .Ll_exc_copy)
589 EXC( LDREST t3, REST(3)(src), .Ll_exc_copy)
590 ADD src, src, 4*NBYTES
591 #ifdef CONFIG_CPU_SB1
592 nop # improves slotting
594 EXC( STORE t0, UNIT(0)(dst), .Ls_exc)
596 EXC( STORE t1, UNIT(1)(dst), .Ls_exc)
598 EXC( STORE t2, UNIT(2)(dst), .Ls_exc)
600 EXC( STORE t3, UNIT(3)(dst), .Ls_exc)
602 .set reorder /* DADDI_WAR */
603 ADD dst, dst, 4*NBYTES
607 .Lcleanup_src_unaligned:
609 and rem, len, NBYTES-1 # rem = len % NBYTES
610 beq rem, len, .Lcopy_bytes
613 EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
614 EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
617 EXC( STORE t0, 0(dst), .Ls_exc)
619 .set reorder /* DADDI_WAR */
624 .Lcopy_bytes_checklen:
628 /* 0 < len < NBYTES */
629 #ifdef CONFIG_CPU_LITTLE_ENDIAN
630 #define SHIFT_START 0
633 #define SHIFT_START 8*(NBYTES-1)
636 move t2, zero # partial word
637 li t3, SHIFT_START # shift
638 /* use .Ll_exc_copy here to return correct sum on fault */
639 #define COPY_BYTE(N) \
640 EXC( lbu t0, N(src), .Ll_exc_copy); \
642 EXC( sb t0, N(dst), .Ls_exc); \
644 addu t3, SHIFT_INC; \
645 beqz len, .Lcopy_bytes_done; \
656 EXC( lbu t0, NBYTES-2(src), .Ll_exc_copy)
658 EXC( sb t0, NBYTES-2(dst), .Ls_exc)
673 #ifdef CONFIG_CPU_MIPSR2
677 beqz odd, 1f /* odd buffer alignment? */
694 * Copy bytes from src until faulting load address (or until a
697 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
698 * may be more than a byte beyond the last address.
699 * Hence, the lb below may get an exception.
701 * Assumes src < THREAD_BUADDR($28)
703 LOAD t0, TI_TASK($28)
705 LOAD t0, THREAD_BUADDR(t0)
707 EXC( lbu t1, 0(src), .Ll_exc)
709 sb t1, 0(dst) # can't fault -- we're copy_from_user
713 .set reorder /* DADDI_WAR */
718 LOAD t0, TI_TASK($28)
720 LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
722 SUB len, AT, t0 # len number of uncopied bytes
724 * Here's where we rely on src and dst being incremented in tandem,
726 * dst += (fault addr - src) to put dst at first byte to clear
728 ADD dst, t0 # compute start address in a1
731 * Clear len bytes starting at dst. Can't call __bzero because it
732 * might modify len. An inefficient loop for these rare times...
734 .set reorder /* DADDI_WAR */
742 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
755 li v0, -1 /* invalid checksum */
760 END(__csum_partial_copy_user)