2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Quick'n'dirty IP checksum ...
8 * Copyright (C) 1998, 1999 Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2007 Maciej W. Rozycki
11 * Copyright (C) 2014 Imagination Technologies Ltd.
13 #include <linux/errno.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/regdef.h>
20 * As we are sharing code base with the mips32 tree (which use the o32 ABI
21 * register definitions). We need to redefine the register definitions from
22 * the n64 ABI register naming to the o32 ABI register naming.
54 #endif /* USE_DOUBLE */
56 #define UNIT(unit) ((unit)*NBYTES)
58 #define ADDC(sum,reg) \
63 #define ADDC32(sum,reg) \
68 #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
69 LOAD _t0, (offset + UNIT(0))(src); \
70 LOAD _t1, (offset + UNIT(1))(src); \
71 LOAD _t2, (offset + UNIT(2))(src); \
72 LOAD _t3, (offset + UNIT(3))(src); \
79 #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
80 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)
82 #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
83 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3); \
84 CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3)
89 * a1: length of the area to checksum
90 * a2: partial checksum
104 bnez t8, .Lsmall_csumcpy /* < 8 bytes to copy */
107 andi t7, src, 0x1 /* odd buffer? */
110 beqz t7, .Lword_align
114 LONG_SUBU a1, a1, 0x1
119 PTR_ADDU src, src, 0x1
123 beqz t8, .Ldword_align
127 LONG_SUBU a1, a1, 0x2
130 PTR_ADDU src, src, 0x2
133 bnez t8, .Ldo_end_words
137 beqz t8, .Lqword_align
141 LONG_SUBU a1, a1, 0x4
143 PTR_ADDU src, src, 0x4
147 beqz t8, .Loword_align
152 LONG_SUBU a1, a1, 0x8
157 LONG_SUBU a1, a1, 0x8
161 PTR_ADDU src, src, 0x8
165 beqz t8, .Lbegin_movement
174 CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
176 LONG_SUBU a1, a1, 0x10
177 PTR_ADDU src, src, 0x10
185 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
186 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
187 CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
188 CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
189 LONG_SUBU t8, t8, 0x01
190 .set reorder /* DADDI_WAR */
191 PTR_ADDU src, src, 0x80
192 bnez t8, .Lmove_128bytes
200 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
201 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
202 PTR_ADDU src, src, 0x40
205 beqz t2, .Ldo_end_words
209 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
211 PTR_ADDU src, src, 0x20
214 beqz t8, .Lsmall_csumcpy
220 LONG_SUBU t8, t8, 0x1
222 .set reorder /* DADDI_WAR */
223 PTR_ADDU src, src, 0x4
227 /* unknown src alignment and < 8 bytes to go */
235 /* Still a full word to go */
239 dsll t1, t1, 32 /* clear lower 32bit */
247 /* Still a halfword to go */
273 /* odd buffer alignment? */
274 #ifdef CONFIG_CPU_MIPSR2
278 beqz t7, 1f /* odd buffer alignment? */
289 /* Add the passed partial csum. */
297 * checksum and copy routines based on memcpy.S
299 * csum_partial_copy_nocheck(src, dst, len, sum)
300 * __csum_partial_copy_kernel(src, dst, len, sum, errp)
302 * See "Spec" in memcpy.S for details. Unlike __copy_user, all
303 * function in this file use the standard calling convention.
315 * The exception handler for loads requires that:
316 * 1- AT contain the address of the byte just past the end of the source
318 * 2- src_entry <= src < AT, and
319 * 3- (dst - src) == (dst_entry - src_entry),
320 * The _entry suffix denotes values when __copy_user was called.
322 * (1) is set up up by __csum_partial_copy_from_user and maintained by
323 * not writing AT in __csum_partial_copy
324 * (2) is met by incrementing src by the number of bytes copied
325 * (3) is met by not doing loads between a pair of increments of dst and src
327 * The exception handlers for stores stores -EFAULT to errptr and return.
328 * These handlers do not need to overwrite any data.
331 /* Instruction type */
334 #define LEGACY_MODE 1
340 * Wrapper to add an entry in the exception table
341 * in case the insn causes a memory exception.
343 * insn : Load/store instruction
344 * type : Instruction type
347 * handler : Exception handler
349 #define EXC(insn, type, reg, addr, handler) \
350 .if \mode == LEGACY_MODE; \
352 .section __ex_table,"a"; \
355 /* This is enabled in EVA mode */ \
357 /* If loading from user or storing to user */ \
358 .if ((\from == USEROP) && (type == LD_INSN)) || \
359 ((\to == USEROP) && (type == ST_INSN)); \
360 9: __BUILD_EVA_INSN(insn##e, reg, addr); \
361 .section __ex_table,"a"; \
365 /* EVA without exception */ \
374 #define LOADK ld /* No exception */
375 #define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
376 #define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
377 #define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
378 #define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
379 #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
380 #define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
381 #define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
382 #define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
394 #define LOADK lw /* No exception */
395 #define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
396 #define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
397 #define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
398 #define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
399 #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
400 #define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
401 #define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
402 #define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
412 #endif /* USE_DOUBLE */
414 #ifdef CONFIG_CPU_LITTLE_ENDIAN
415 #define LDFIRST LOADR
417 #define STFIRST STORER
418 #define STREST STOREL
419 #define SHIFT_DISCARD SLLV
420 #define SHIFT_DISCARD_REVERT SRLV
422 #define LDFIRST LOADL
424 #define STFIRST STOREL
425 #define STREST STORER
426 #define SHIFT_DISCARD SRLV
427 #define SHIFT_DISCARD_REVERT SLLV
430 #define FIRST(unit) ((unit)*NBYTES)
431 #define REST(unit) (FIRST(unit)+NBYTES-1)
433 #define ADDRMASK (NBYTES-1)
435 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
441 .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck
443 PTR_ADDU AT, src, len /* See (1) above. */
444 /* initialize __nocheck if this the first time we execute this
453 FEXPORT(csum_partial_copy_nocheck)
458 * Note: dst & src may be unaligned, len may be 0
462 * The "issue break"s below are very approximate.
463 * Issue delays for dcache fills will perturb the schedule, as will
464 * load queue full replay traps, etc.
466 * If len < NBYTES use byte operations.
469 and t1, dst, ADDRMASK
470 bnez t2, .Lcopy_bytes_checklen\@
471 and t0, src, ADDRMASK
472 andi odd, dst, 0x1 /* odd buffer? */
473 bnez t1, .Ldst_unaligned\@
475 bnez t0, .Lsrc_unaligned_dst_aligned\@
477 * use delay slot for fall-through
478 * src and dst are aligned; need to compute rem
481 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
482 beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
484 SUB len, 8*NBYTES # subtract here for bgez loop
487 LOAD(t0, UNIT(0)(src), .Ll_exc\@)
488 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
489 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
490 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
491 LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
492 LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@)
493 LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
494 LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)
495 SUB len, len, 8*NBYTES
496 ADD src, src, 8*NBYTES
497 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
499 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
501 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
503 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
505 STORE(t4, UNIT(4)(dst), .Ls_exc\@)
507 STORE(t5, UNIT(5)(dst), .Ls_exc\@)
509 STORE(t6, UNIT(6)(dst), .Ls_exc\@)
511 STORE(t7, UNIT(7)(dst), .Ls_exc\@)
513 .set reorder /* DADDI_WAR */
514 ADD dst, dst, 8*NBYTES
517 ADD len, 8*NBYTES # revert len (see above)
520 * len == the number of bytes left to copy < 8*NBYTES
522 .Lcleanup_both_aligned\@:
525 sltu t0, len, 4*NBYTES
526 bnez t0, .Lless_than_4units\@
527 and rem, len, (NBYTES-1) # rem = len % NBYTES
531 LOAD(t0, UNIT(0)(src), .Ll_exc\@)
532 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
533 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
534 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
535 SUB len, len, 4*NBYTES
536 ADD src, src, 4*NBYTES
537 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
539 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
541 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
543 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
545 .set reorder /* DADDI_WAR */
546 ADD dst, dst, 4*NBYTES
549 .Lless_than_4units\@:
553 beq rem, len, .Lcopy_bytes\@
556 LOAD(t0, 0(src), .Ll_exc\@)
559 STORE(t0, 0(dst), .Ls_exc\@)
561 .set reorder /* DADDI_WAR */
567 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
568 * A loop would do only a byte at a time with possible branch
569 * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
570 * because can't assume read-access to dst. Instead, use
571 * STREST dst, which doesn't require read access to dst.
573 * This code should perform better than a simple loop on modern,
574 * wide-issue mips processors because the code has fewer branches and
575 * more instruction-level parallelism.
579 ADD t1, dst, len # t1 is just past last byte of dst
581 SLL rem, len, 3 # rem = number of bits to keep
582 LOAD(t0, 0(src), .Ll_exc\@)
583 SUB bits, bits, rem # bits = number of bits to discard
584 SHIFT_DISCARD t0, t0, bits
585 STREST(t0, -1(t1), .Ls_exc\@)
586 SHIFT_DISCARD_REVERT t0, t0, bits
594 * t0 = src & ADDRMASK
595 * t1 = dst & ADDRMASK; T1 > 0
598 * Copy enough bytes to align dst
599 * Set match = (src and dst have same alignment)
602 LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
604 LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
605 SUB t2, t2, t1 # t2 = number of bytes copied
607 STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
608 SLL t4, t1, 3 # t4 = number of bits to discard
609 SHIFT_DISCARD t3, t3, t4
610 /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
612 beq len, t2, .Ldone\@
615 beqz match, .Lboth_aligned\@
618 .Lsrc_unaligned_dst_aligned\@:
619 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
620 beqz t0, .Lcleanup_src_unaligned\@
621 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
624 * Avoid consecutive LD*'s to the same register since some mips
625 * implementations can't issue them in the same cycle.
626 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
627 * are to the same unit (unless src is aligned, but it's not).
629 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
630 LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
631 SUB len, len, 4*NBYTES
632 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
633 LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
634 LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
635 LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
636 LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
637 LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
638 ADD src, src, 4*NBYTES
639 #ifdef CONFIG_CPU_SB1
640 nop # improves slotting
642 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
644 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
646 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
648 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
650 .set reorder /* DADDI_WAR */
651 ADD dst, dst, 4*NBYTES
655 .Lcleanup_src_unaligned\@:
657 and rem, len, NBYTES-1 # rem = len % NBYTES
658 beq rem, len, .Lcopy_bytes\@
661 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
662 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
665 STORE(t0, 0(dst), .Ls_exc\@)
667 .set reorder /* DADDI_WAR */
672 .Lcopy_bytes_checklen\@:
676 /* 0 < len < NBYTES */
677 #ifdef CONFIG_CPU_LITTLE_ENDIAN
678 #define SHIFT_START 0
681 #define SHIFT_START 8*(NBYTES-1)
684 move t2, zero # partial word
685 li t3, SHIFT_START # shift
686 /* use .Ll_exc_copy here to return correct sum on fault */
687 #define COPY_BYTE(N) \
688 LOADBU(t0, N(src), .Ll_exc_copy\@); \
690 STOREB(t0, N(dst), .Ls_exc\@); \
692 addu t3, SHIFT_INC; \
693 beqz len, .Lcopy_bytes_done\@; \
704 LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)
706 STOREB(t0, NBYTES-2(dst), .Ls_exc\@)
721 #ifdef CONFIG_CPU_MIPSR2
725 beqz odd, 1f /* odd buffer alignment? */
742 * Copy bytes from src until faulting load address (or until a
745 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
746 * may be more than a byte beyond the last address.
747 * Hence, the lb below may get an exception.
749 * Assumes src < THREAD_BUADDR($28)
751 LOADK t0, TI_TASK($28)
753 LOADK t0, THREAD_BUADDR(t0)
755 LOADBU(t1, 0(src), .Ll_exc\@)
757 sb t1, 0(dst) # can't fault -- we're copy_from_user
761 .set reorder /* DADDI_WAR */
766 LOADK t0, TI_TASK($28)
768 LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
770 SUB len, AT, t0 # len number of uncopied bytes
772 * Here's where we rely on src and dst being incremented in tandem,
774 * dst += (fault addr - src) to put dst at first byte to clear
776 ADD dst, t0 # compute start address in a1
779 * Clear len bytes starting at dst. Can't call __bzero because it
780 * might modify len. An inefficient loop for these rare times...
782 .set reorder /* DADDI_WAR */
790 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
803 li v0, -1 /* invalid checksum */
810 LEAF(__csum_partial_copy_kernel)
812 FEXPORT(__csum_partial_copy_to_user)
813 FEXPORT(__csum_partial_copy_from_user)
815 __BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1
816 END(__csum_partial_copy_kernel)
819 LEAF(__csum_partial_copy_to_user)
820 __BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0
821 END(__csum_partial_copy_to_user)
823 LEAF(__csum_partial_copy_from_user)
824 __BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0
825 END(__csum_partial_copy_from_user)