2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Quick'n'dirty IP checksum ...
8 * Copyright (C) 1998, 1999 Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2007 Maciej W. Rozycki
11 * Copyright (C) 2014 Imagination Technologies Ltd.
13 #include <linux/errno.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/regdef.h>
20 * As we are sharing code base with the mips32 tree (which use the o32 ABI
21 * register definitions). We need to redefine the register definitions from
22 * the n64 ABI register naming to the o32 ABI register naming.
54 #endif /* USE_DOUBLE */
56 #define UNIT(unit) ((unit)*NBYTES)
58 #define ADDC(sum,reg) \
66 #define ADDC32(sum,reg) \
74 #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
75 LOAD _t0, (offset + UNIT(0))(src); \
76 LOAD _t1, (offset + UNIT(1))(src); \
77 LOAD _t2, (offset + UNIT(2))(src); \
78 LOAD _t3, (offset + UNIT(3))(src); \
85 #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
86 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)
88 #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
89 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3); \
90 CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3)
95 * a1: length of the area to checksum
96 * a2: partial checksum
110 bnez t8, .Lsmall_csumcpy /* < 8 bytes to copy */
113 andi t7, src, 0x1 /* odd buffer? */
116 beqz t7, .Lword_align
120 LONG_SUBU a1, a1, 0x1
125 PTR_ADDU src, src, 0x1
129 beqz t8, .Ldword_align
133 LONG_SUBU a1, a1, 0x2
136 PTR_ADDU src, src, 0x2
139 bnez t8, .Ldo_end_words
143 beqz t8, .Lqword_align
147 LONG_SUBU a1, a1, 0x4
149 PTR_ADDU src, src, 0x4
153 beqz t8, .Loword_align
158 LONG_SUBU a1, a1, 0x8
163 LONG_SUBU a1, a1, 0x8
167 PTR_ADDU src, src, 0x8
171 beqz t8, .Lbegin_movement
180 CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
182 LONG_SUBU a1, a1, 0x10
183 PTR_ADDU src, src, 0x10
191 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
192 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
193 CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
194 CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
195 LONG_SUBU t8, t8, 0x01
196 .set reorder /* DADDI_WAR */
197 PTR_ADDU src, src, 0x80
198 bnez t8, .Lmove_128bytes
206 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
207 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
208 PTR_ADDU src, src, 0x40
211 beqz t2, .Ldo_end_words
215 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
217 PTR_ADDU src, src, 0x20
220 beqz t8, .Lsmall_csumcpy
226 LONG_SUBU t8, t8, 0x1
228 .set reorder /* DADDI_WAR */
229 PTR_ADDU src, src, 0x4
233 /* unknown src alignment and < 8 bytes to go */
241 /* Still a full word to go */
245 dsll t1, t1, 32 /* clear lower 32bit */
253 /* Still a halfword to go */
279 /* odd buffer alignment? */
280 #ifdef CONFIG_CPU_MIPSR2
284 beqz t7, 1f /* odd buffer alignment? */
295 /* Add the passed partial csum. */
303 * checksum and copy routines based on memcpy.S
305 * csum_partial_copy_nocheck(src, dst, len, sum)
306 * __csum_partial_copy_kernel(src, dst, len, sum, errp)
308 * See "Spec" in memcpy.S for details. Unlike __copy_user, all
309 * function in this file use the standard calling convention.
321 * The exception handler for loads requires that:
322 * 1- AT contain the address of the byte just past the end of the source
324 * 2- src_entry <= src < AT, and
325 * 3- (dst - src) == (dst_entry - src_entry),
326 * The _entry suffix denotes values when __copy_user was called.
328 * (1) is set up up by __csum_partial_copy_from_user and maintained by
329 * not writing AT in __csum_partial_copy
330 * (2) is met by incrementing src by the number of bytes copied
331 * (3) is met by not doing loads between a pair of increments of dst and src
333 * The exception handlers for stores stores -EFAULT to errptr and return.
334 * These handlers do not need to overwrite any data.
337 /* Instruction type */
340 #define LEGACY_MODE 1
346 * Wrapper to add an entry in the exception table
347 * in case the insn causes a memory exception.
349 * insn : Load/store instruction
350 * type : Instruction type
353 * handler : Exception handler
355 #define EXC(insn, type, reg, addr, handler) \
356 .if \mode == LEGACY_MODE; \
358 .section __ex_table,"a"; \
361 /* This is enabled in EVA mode */ \
363 /* If loading from user or storing to user */ \
364 .if ((\from == USEROP) && (type == LD_INSN)) || \
365 ((\to == USEROP) && (type == ST_INSN)); \
366 9: __BUILD_EVA_INSN(insn##e, reg, addr); \
367 .section __ex_table,"a"; \
371 /* EVA without exception */ \
380 #define LOADK ld /* No exception */
381 #define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
382 #define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
383 #define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
384 #define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
385 #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
386 #define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
387 #define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
388 #define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
400 #define LOADK lw /* No exception */
401 #define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
402 #define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
403 #define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
404 #define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
405 #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
406 #define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
407 #define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
408 #define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
418 #endif /* USE_DOUBLE */
420 #ifdef CONFIG_CPU_LITTLE_ENDIAN
421 #define LDFIRST LOADR
423 #define STFIRST STORER
424 #define STREST STOREL
425 #define SHIFT_DISCARD SLLV
426 #define SHIFT_DISCARD_REVERT SRLV
428 #define LDFIRST LOADL
430 #define STFIRST STOREL
431 #define STREST STORER
432 #define SHIFT_DISCARD SRLV
433 #define SHIFT_DISCARD_REVERT SLLV
436 #define FIRST(unit) ((unit)*NBYTES)
437 #define REST(unit) (FIRST(unit)+NBYTES-1)
439 #define ADDRMASK (NBYTES-1)
441 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
447 .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck
449 PTR_ADDU AT, src, len /* See (1) above. */
450 /* initialize __nocheck if this the first time we execute this
459 FEXPORT(csum_partial_copy_nocheck)
464 * Note: dst & src may be unaligned, len may be 0
468 * The "issue break"s below are very approximate.
469 * Issue delays for dcache fills will perturb the schedule, as will
470 * load queue full replay traps, etc.
472 * If len < NBYTES use byte operations.
475 and t1, dst, ADDRMASK
476 bnez t2, .Lcopy_bytes_checklen\@
477 and t0, src, ADDRMASK
478 andi odd, dst, 0x1 /* odd buffer? */
479 bnez t1, .Ldst_unaligned\@
481 bnez t0, .Lsrc_unaligned_dst_aligned\@
483 * use delay slot for fall-through
484 * src and dst are aligned; need to compute rem
487 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
488 beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
490 SUB len, 8*NBYTES # subtract here for bgez loop
493 LOAD(t0, UNIT(0)(src), .Ll_exc\@)
494 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
495 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
496 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
497 LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
498 LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@)
499 LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
500 LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)
501 SUB len, len, 8*NBYTES
502 ADD src, src, 8*NBYTES
503 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
505 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
507 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
509 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
511 STORE(t4, UNIT(4)(dst), .Ls_exc\@)
513 STORE(t5, UNIT(5)(dst), .Ls_exc\@)
515 STORE(t6, UNIT(6)(dst), .Ls_exc\@)
517 STORE(t7, UNIT(7)(dst), .Ls_exc\@)
519 .set reorder /* DADDI_WAR */
520 ADD dst, dst, 8*NBYTES
523 ADD len, 8*NBYTES # revert len (see above)
526 * len == the number of bytes left to copy < 8*NBYTES
528 .Lcleanup_both_aligned\@:
531 sltu t0, len, 4*NBYTES
532 bnez t0, .Lless_than_4units\@
533 and rem, len, (NBYTES-1) # rem = len % NBYTES
537 LOAD(t0, UNIT(0)(src), .Ll_exc\@)
538 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
539 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
540 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
541 SUB len, len, 4*NBYTES
542 ADD src, src, 4*NBYTES
543 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
545 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
547 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
549 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
551 .set reorder /* DADDI_WAR */
552 ADD dst, dst, 4*NBYTES
555 .Lless_than_4units\@:
559 beq rem, len, .Lcopy_bytes\@
562 LOAD(t0, 0(src), .Ll_exc\@)
565 STORE(t0, 0(dst), .Ls_exc\@)
567 .set reorder /* DADDI_WAR */
573 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
574 * A loop would do only a byte at a time with possible branch
575 * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
576 * because can't assume read-access to dst. Instead, use
577 * STREST dst, which doesn't require read access to dst.
579 * This code should perform better than a simple loop on modern,
580 * wide-issue mips processors because the code has fewer branches and
581 * more instruction-level parallelism.
585 ADD t1, dst, len # t1 is just past last byte of dst
587 SLL rem, len, 3 # rem = number of bits to keep
588 LOAD(t0, 0(src), .Ll_exc\@)
589 SUB bits, bits, rem # bits = number of bits to discard
590 SHIFT_DISCARD t0, t0, bits
591 STREST(t0, -1(t1), .Ls_exc\@)
592 SHIFT_DISCARD_REVERT t0, t0, bits
600 * t0 = src & ADDRMASK
601 * t1 = dst & ADDRMASK; T1 > 0
604 * Copy enough bytes to align dst
605 * Set match = (src and dst have same alignment)
608 LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
610 LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
611 SUB t2, t2, t1 # t2 = number of bytes copied
613 STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
614 SLL t4, t1, 3 # t4 = number of bits to discard
615 SHIFT_DISCARD t3, t3, t4
616 /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
618 beq len, t2, .Ldone\@
621 beqz match, .Lboth_aligned\@
624 .Lsrc_unaligned_dst_aligned\@:
625 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
626 beqz t0, .Lcleanup_src_unaligned\@
627 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
630 * Avoid consecutive LD*'s to the same register since some mips
631 * implementations can't issue them in the same cycle.
632 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
633 * are to the same unit (unless src is aligned, but it's not).
635 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
636 LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
637 SUB len, len, 4*NBYTES
638 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
639 LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
640 LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
641 LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
642 LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
643 LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
644 ADD src, src, 4*NBYTES
645 #ifdef CONFIG_CPU_SB1
646 nop # improves slotting
648 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
650 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
652 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
654 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
656 .set reorder /* DADDI_WAR */
657 ADD dst, dst, 4*NBYTES
661 .Lcleanup_src_unaligned\@:
663 and rem, len, NBYTES-1 # rem = len % NBYTES
664 beq rem, len, .Lcopy_bytes\@
667 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
668 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
671 STORE(t0, 0(dst), .Ls_exc\@)
673 .set reorder /* DADDI_WAR */
678 .Lcopy_bytes_checklen\@:
682 /* 0 < len < NBYTES */
683 #ifdef CONFIG_CPU_LITTLE_ENDIAN
684 #define SHIFT_START 0
687 #define SHIFT_START 8*(NBYTES-1)
690 move t2, zero # partial word
691 li t3, SHIFT_START # shift
692 /* use .Ll_exc_copy here to return correct sum on fault */
693 #define COPY_BYTE(N) \
694 LOADBU(t0, N(src), .Ll_exc_copy\@); \
696 STOREB(t0, N(dst), .Ls_exc\@); \
698 addu t3, SHIFT_INC; \
699 beqz len, .Lcopy_bytes_done\@; \
710 LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)
712 STOREB(t0, NBYTES-2(dst), .Ls_exc\@)
729 #ifdef CONFIG_CPU_MIPSR2
733 beqz odd, 1f /* odd buffer alignment? */
751 * Copy bytes from src until faulting load address (or until a
754 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
755 * may be more than a byte beyond the last address.
756 * Hence, the lb below may get an exception.
758 * Assumes src < THREAD_BUADDR($28)
760 LOADK t0, TI_TASK($28)
762 LOADK t0, THREAD_BUADDR(t0)
764 LOADBU(t1, 0(src), .Ll_exc\@)
766 sb t1, 0(dst) # can't fault -- we're copy_from_user
770 .set reorder /* DADDI_WAR */
775 LOADK t0, TI_TASK($28)
777 LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
779 SUB len, AT, t0 # len number of uncopied bytes
781 * Here's where we rely on src and dst being incremented in tandem,
783 * dst += (fault addr - src) to put dst at first byte to clear
785 ADD dst, t0 # compute start address in a1
788 * Clear len bytes starting at dst. Can't call __bzero because it
789 * might modify len. An inefficient loop for these rare times...
791 .set reorder /* DADDI_WAR */
799 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
812 li v0, -1 /* invalid checksum */
819 LEAF(__csum_partial_copy_kernel)
821 FEXPORT(__csum_partial_copy_to_user)
822 FEXPORT(__csum_partial_copy_from_user)
824 __BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1
825 END(__csum_partial_copy_kernel)
828 LEAF(__csum_partial_copy_to_user)
829 __BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0
830 END(__csum_partial_copy_to_user)
832 LEAF(__csum_partial_copy_from_user)
833 __BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0
834 END(__csum_partial_copy_from_user)