2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Quick'n'dirty IP checksum ...
8 * Copyright (C) 1998, 1999 Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2007 Maciej W. Rozycki
11 * Copyright (C) 2014 Imagination Technologies Ltd.
13 #include <linux/errno.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/export.h>
17 #include <asm/regdef.h>
21 * As we are sharing code base with the mips32 tree (which use the o32 ABI
22 * register definitions). We need to redefine the register definitions from
23 * the n64 ABI register naming to the o32 ABI register naming.
55 #endif /* USE_DOUBLE */
57 #define UNIT(unit) ((unit)*NBYTES)
59 #define ADDC(sum,reg) \
67 #define ADDC32(sum,reg) \
75 #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
76 LOAD _t0, (offset + UNIT(0))(src); \
77 LOAD _t1, (offset + UNIT(1))(src); \
78 LOAD _t2, (offset + UNIT(2))(src); \
79 LOAD _t3, (offset + UNIT(3))(src); \
86 #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
87 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)
89 #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
90 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3); \
91 CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3)
96 * a1: length of the area to checksum
97 * a2: partial checksum
107 EXPORT_SYMBOL(csum_partial)
112 bnez t8, .Lsmall_csumcpy /* < 8 bytes to copy */
115 andi t7, src, 0x1 /* odd buffer? */
118 beqz t7, .Lword_align
122 LONG_SUBU a1, a1, 0x1
127 PTR_ADDU src, src, 0x1
131 beqz t8, .Ldword_align
135 LONG_SUBU a1, a1, 0x2
138 PTR_ADDU src, src, 0x2
141 bnez t8, .Ldo_end_words
145 beqz t8, .Lqword_align
149 LONG_SUBU a1, a1, 0x4
151 PTR_ADDU src, src, 0x4
155 beqz t8, .Loword_align
160 LONG_SUBU a1, a1, 0x8
165 LONG_SUBU a1, a1, 0x8
169 PTR_ADDU src, src, 0x8
173 beqz t8, .Lbegin_movement
182 CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
184 LONG_SUBU a1, a1, 0x10
185 PTR_ADDU src, src, 0x10
193 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
194 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
195 CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
196 CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
197 LONG_SUBU t8, t8, 0x01
198 .set reorder /* DADDI_WAR */
199 PTR_ADDU src, src, 0x80
200 bnez t8, .Lmove_128bytes
208 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
209 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
210 PTR_ADDU src, src, 0x40
213 beqz t2, .Ldo_end_words
217 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
219 PTR_ADDU src, src, 0x20
222 beqz t8, .Lsmall_csumcpy
228 LONG_SUBU t8, t8, 0x1
230 .set reorder /* DADDI_WAR */
231 PTR_ADDU src, src, 0x4
235 /* unknown src alignment and < 8 bytes to go */
243 /* Still a full word to go */
247 dsll t1, t1, 32 /* clear lower 32bit */
255 /* Still a halfword to go */
281 /* odd buffer alignment? */
282 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON64)
289 beqz t7, 1f /* odd buffer alignment? */
300 /* Add the passed partial csum. */
308 * checksum and copy routines based on memcpy.S
310 * csum_partial_copy_nocheck(src, dst, len, sum)
311 * __csum_partial_copy_kernel(src, dst, len, sum, errp)
313 * See "Spec" in memcpy.S for details. Unlike __copy_user, all
314 * function in this file use the standard calling convention.
326 * The exception handler for loads requires that:
327 * 1- AT contain the address of the byte just past the end of the source
329 * 2- src_entry <= src < AT, and
330 * 3- (dst - src) == (dst_entry - src_entry),
331 * The _entry suffix denotes values when __copy_user was called.
333 * (1) is set up up by __csum_partial_copy_from_user and maintained by
334 * not writing AT in __csum_partial_copy
335 * (2) is met by incrementing src by the number of bytes copied
336 * (3) is met by not doing loads between a pair of increments of dst and src
338 * The exception handlers for stores stores -EFAULT to errptr and return.
339 * These handlers do not need to overwrite any data.
342 /* Instruction type */
345 #define LEGACY_MODE 1
351 * Wrapper to add an entry in the exception table
352 * in case the insn causes a memory exception.
354 * insn : Load/store instruction
355 * type : Instruction type
358 * handler : Exception handler
360 #define EXC(insn, type, reg, addr, handler) \
361 .if \mode == LEGACY_MODE; \
363 .section __ex_table,"a"; \
366 /* This is enabled in EVA mode */ \
368 /* If loading from user or storing to user */ \
369 .if ((\from == USEROP) && (type == LD_INSN)) || \
370 ((\to == USEROP) && (type == ST_INSN)); \
371 9: __BUILD_EVA_INSN(insn##e, reg, addr); \
372 .section __ex_table,"a"; \
376 /* EVA without exception */ \
385 #define LOADK ld /* No exception */
386 #define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
387 #define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
388 #define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
389 #define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
390 #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
391 #define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
392 #define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
393 #define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
405 #define LOADK lw /* No exception */
406 #define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
407 #define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
408 #define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
409 #define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
410 #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
411 #define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
412 #define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
413 #define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
423 #endif /* USE_DOUBLE */
425 #ifdef CONFIG_CPU_LITTLE_ENDIAN
426 #define LDFIRST LOADR
428 #define STFIRST STORER
429 #define STREST STOREL
430 #define SHIFT_DISCARD SLLV
431 #define SHIFT_DISCARD_REVERT SRLV
433 #define LDFIRST LOADL
435 #define STFIRST STOREL
436 #define STREST STORER
437 #define SHIFT_DISCARD SRLV
438 #define SHIFT_DISCARD_REVERT SLLV
441 #define FIRST(unit) ((unit)*NBYTES)
442 #define REST(unit) (FIRST(unit)+NBYTES-1)
444 #define ADDRMASK (NBYTES-1)
446 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
452 .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck
454 PTR_ADDU AT, src, len /* See (1) above. */
455 /* initialize __nocheck if this the first time we execute this
464 FEXPORT(csum_partial_copy_nocheck)
465 EXPORT_SYMBOL(csum_partial_copy_nocheck)
470 * Note: dst & src may be unaligned, len may be 0
474 * The "issue break"s below are very approximate.
475 * Issue delays for dcache fills will perturb the schedule, as will
476 * load queue full replay traps, etc.
478 * If len < NBYTES use byte operations.
481 and t1, dst, ADDRMASK
482 bnez t2, .Lcopy_bytes_checklen\@
483 and t0, src, ADDRMASK
484 andi odd, dst, 0x1 /* odd buffer? */
485 bnez t1, .Ldst_unaligned\@
487 bnez t0, .Lsrc_unaligned_dst_aligned\@
489 * use delay slot for fall-through
490 * src and dst are aligned; need to compute rem
493 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
494 beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
496 SUB len, 8*NBYTES # subtract here for bgez loop
499 LOAD(t0, UNIT(0)(src), .Ll_exc\@)
500 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
501 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
502 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
503 LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
504 LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@)
505 LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
506 LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)
507 SUB len, len, 8*NBYTES
508 ADD src, src, 8*NBYTES
509 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
511 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
513 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
515 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
517 STORE(t4, UNIT(4)(dst), .Ls_exc\@)
519 STORE(t5, UNIT(5)(dst), .Ls_exc\@)
521 STORE(t6, UNIT(6)(dst), .Ls_exc\@)
523 STORE(t7, UNIT(7)(dst), .Ls_exc\@)
525 .set reorder /* DADDI_WAR */
526 ADD dst, dst, 8*NBYTES
529 ADD len, 8*NBYTES # revert len (see above)
532 * len == the number of bytes left to copy < 8*NBYTES
534 .Lcleanup_both_aligned\@:
537 sltu t0, len, 4*NBYTES
538 bnez t0, .Lless_than_4units\@
539 and rem, len, (NBYTES-1) # rem = len % NBYTES
543 LOAD(t0, UNIT(0)(src), .Ll_exc\@)
544 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
545 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
546 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
547 SUB len, len, 4*NBYTES
548 ADD src, src, 4*NBYTES
549 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
551 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
553 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
555 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
557 .set reorder /* DADDI_WAR */
558 ADD dst, dst, 4*NBYTES
561 .Lless_than_4units\@:
565 beq rem, len, .Lcopy_bytes\@
568 LOAD(t0, 0(src), .Ll_exc\@)
571 STORE(t0, 0(dst), .Ls_exc\@)
573 .set reorder /* DADDI_WAR */
579 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
580 * A loop would do only a byte at a time with possible branch
581 * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
582 * because can't assume read-access to dst. Instead, use
583 * STREST dst, which doesn't require read access to dst.
585 * This code should perform better than a simple loop on modern,
586 * wide-issue mips processors because the code has fewer branches and
587 * more instruction-level parallelism.
591 ADD t1, dst, len # t1 is just past last byte of dst
593 SLL rem, len, 3 # rem = number of bits to keep
594 LOAD(t0, 0(src), .Ll_exc\@)
595 SUB bits, bits, rem # bits = number of bits to discard
596 SHIFT_DISCARD t0, t0, bits
597 STREST(t0, -1(t1), .Ls_exc\@)
598 SHIFT_DISCARD_REVERT t0, t0, bits
606 * t0 = src & ADDRMASK
607 * t1 = dst & ADDRMASK; T1 > 0
610 * Copy enough bytes to align dst
611 * Set match = (src and dst have same alignment)
614 LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
616 LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
617 SUB t2, t2, t1 # t2 = number of bytes copied
619 STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
620 SLL t4, t1, 3 # t4 = number of bits to discard
621 SHIFT_DISCARD t3, t3, t4
622 /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
624 beq len, t2, .Ldone\@
627 beqz match, .Lboth_aligned\@
630 .Lsrc_unaligned_dst_aligned\@:
631 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
632 beqz t0, .Lcleanup_src_unaligned\@
633 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
636 * Avoid consecutive LD*'s to the same register since some mips
637 * implementations can't issue them in the same cycle.
638 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
639 * are to the same unit (unless src is aligned, but it's not).
641 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
642 LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
643 SUB len, len, 4*NBYTES
644 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
645 LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
646 LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
647 LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
648 LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
649 LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
650 ADD src, src, 4*NBYTES
651 #ifdef CONFIG_CPU_SB1
652 nop # improves slotting
654 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
656 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
658 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
660 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
662 .set reorder /* DADDI_WAR */
663 ADD dst, dst, 4*NBYTES
667 .Lcleanup_src_unaligned\@:
669 and rem, len, NBYTES-1 # rem = len % NBYTES
670 beq rem, len, .Lcopy_bytes\@
673 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
674 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
677 STORE(t0, 0(dst), .Ls_exc\@)
679 .set reorder /* DADDI_WAR */
684 .Lcopy_bytes_checklen\@:
688 /* 0 < len < NBYTES */
689 #ifdef CONFIG_CPU_LITTLE_ENDIAN
690 #define SHIFT_START 0
693 #define SHIFT_START 8*(NBYTES-1)
696 move t2, zero # partial word
697 li t3, SHIFT_START # shift
698 /* use .Ll_exc_copy here to return correct sum on fault */
699 #define COPY_BYTE(N) \
700 LOADBU(t0, N(src), .Ll_exc_copy\@); \
702 STOREB(t0, N(dst), .Ls_exc\@); \
704 addu t3, SHIFT_INC; \
705 beqz len, .Lcopy_bytes_done\@; \
716 LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)
718 STOREB(t0, NBYTES-2(dst), .Ls_exc\@)
735 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON64)
742 beqz odd, 1f /* odd buffer alignment? */
760 * Copy bytes from src until faulting load address (or until a
763 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
764 * may be more than a byte beyond the last address.
765 * Hence, the lb below may get an exception.
767 * Assumes src < THREAD_BUADDR($28)
769 LOADK t0, TI_TASK($28)
771 LOADK t0, THREAD_BUADDR(t0)
773 LOADBU(t1, 0(src), .Ll_exc\@)
775 sb t1, 0(dst) # can't fault -- we're copy_from_user
779 .set reorder /* DADDI_WAR */
784 LOADK t0, TI_TASK($28)
786 LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
788 SUB len, AT, t0 # len number of uncopied bytes
790 * Here's where we rely on src and dst being incremented in tandem,
792 * dst += (fault addr - src) to put dst at first byte to clear
794 ADD dst, t0 # compute start address in a1
797 * Clear len bytes starting at dst. Can't call __bzero because it
798 * might modify len. An inefficient loop for these rare times...
800 .set reorder /* DADDI_WAR */
808 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
821 li v0, -1 /* invalid checksum */
828 LEAF(__csum_partial_copy_kernel)
829 EXPORT_SYMBOL(__csum_partial_copy_kernel)
831 FEXPORT(__csum_partial_copy_to_user)
832 EXPORT_SYMBOL(__csum_partial_copy_to_user)
833 FEXPORT(__csum_partial_copy_from_user)
834 EXPORT_SYMBOL(__csum_partial_copy_from_user)
836 __BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1
837 END(__csum_partial_copy_kernel)
840 LEAF(__csum_partial_copy_to_user)
841 __BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0
842 END(__csum_partial_copy_to_user)
844 LEAF(__csum_partial_copy_from_user)
845 __BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0
846 END(__csum_partial_copy_from_user)