1 /* SPDX-License-Identifier: GPL-2.0+
3 * $Id: checksum.S,v 1.10 2001/07/06 13:11:32 gniibe Exp $
5 * INET An implementation of the TCP/IP protocol suite for the LINUX
6 * operating system. INET is implemented using the BSD Socket
7 * interface as the means of communication with the user level.
9 * IP/TCP/UDP checksumming routines
11 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
12 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
13 * Tom May, <ftom@netcom.com>
14 * Pentium Pro/II routines:
15 * Alexander Kjeldaas <astor@guardian.no>
16 * Finn Arne Gangstad <finnag@guardian.no>
17 * Lots of code moved from tcp.c and ip.c; see those files
20 * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
22 * Andi Kleen, add zeroing on error
23 * converted to pure assembler
25 * SuperH version: Copyright (C) 1999 Niibe Yutaka
28 #include <asm/errno.h>
29 #include <linux/linkage.h>
32 * computes a partial checksum, e.g. for TCP/UDP fragments
36 * asmlinkage __wsum csum_partial(const void *buf, int len, __wsum sum);
42 * Experiments with Ethernet and SLIP connections show that buff
43 * is aligned on either a 2-byte or 4-byte boundary. We get at
44 * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
45 * Fortunately, it is easy to convert 2-byte alignment to 4-byte
46 * alignment for the unrolled loop.
49 tst #3, r0 ! Check alignment.
50 bt/s 2f ! Jump if alignment is ok.
51 mov r4, r7 ! Keep a copy to check for alignment
53 tst #1, r0 ! Check alignment.
54 bt 21f ! Jump if alignment is boundary of 2bytes.
62 addc r0, r6 ! t=0 from previous tst
72 ! buf is 2 byte aligned (len could be 0)
73 add #-2, r5 ! Alignment uses up two bytes.
75 bt/s 1f ! Jump if we had at least two bytes.
78 add #2, r5 ! r5 was < 2. Deal with it.
86 ! buf is 4 byte aligned (len could be 0)
91 bt/s 4f ! if it's =0, go to 4f
115 ! here, we know r1==0
116 addc r1, r6 ! add carry to r6
122 ! 4 bytes or more remaining
134 addc r1, r6 ! r1==0 here, so it means add carry-bit
136 ! 3 bytes or less remaining
140 bt 9f ! if it's =0 go to 9f
154 #ifndef __LITTLE_ENDIAN__
162 ! Check if the buffer was misaligned, if so realign sum
176 unsigned int csum_partial_copy_generic (const char *src, char *dst, int len)
180 * Copy from ds while checksumming, otherwise like csum_partial with initial
185 9999: __VA_ARGS__ ; \
186 .section __ex_table, "a"; \
187 .long 9999b, 6001f ; \
191 ! r4: const char *SRC
195 ENTRY(csum_partial_copy_generic)
197 mov #3,r0 ! Check src and dest are equally aligned
202 bf 3f ! Different alignments, use slow version
203 tst #1,r0 ! Check dest word aligned
204 bf 3f ! If not, do it the slow way
207 tst r0,r5 ! Check dest alignment.
208 bt 2f ! Jump if alignment is ok.
209 add #-2,r6 ! Alignment uses up two bytes.
210 cmp/pz r6 ! Jump if we had at least two bytes.
213 add #2,r6 ! r6 was < 2. Deal with it.
217 3: ! Handle different src and dest alignments.
218 ! This is not common, so simple byte by byte copy will do.
230 EXC( mov.b r0,@(1,r5) )
234 #ifdef __LITTLE_ENDIAN__
255 ! src and dest equally aligned, but to a two byte boundary.
256 ! Handle first two bytes as a special case
279 EXC( mov.l r1,@(4,r5) )
285 EXC( mov.l r0,@(8,r5) )
286 EXC( mov.l r1,@(12,r5) )
292 EXC( mov.l r0,@(16,r5) )
293 EXC( mov.l r1,@(20,r5) )
299 EXC( mov.l r0,@(24,r5) )
300 EXC( mov.l r1,@(28,r5) )
349 #ifndef __LITTLE_ENDIAN__
358 .section .fixup, "ax"