1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * IP/TCP/UDP checksumming routines
9 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Tom May, <ftom@netcom.com>
12 * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
13 * Lots of code moved from tcp.c and ip.c; see those files
16 * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
17 * Fixed some nasty bugs, causing some horrible crashes.
18 * A: At some points, the sum (%0) was used as
19 * length-counter instead of the length counter
20 * (%1). Thanks to Roman Hodek for pointing this out.
21 * B: GCC seems to mess up if one uses too many
22 * data-registers to hold input values and one tries to
23 * specify d0 and d1 as scratch registers. Letting gcc
24 * choose these registers itself solves the problem.
26 * 1998/8/31 Andreas Schwab:
27 * Zero out rest of buffer on exception in
28 * csum_partial_copy_from_user.
31 #include <linux/module.h>
32 #include <net/checksum.h>
35 * computes a partial checksum, e.g. for TCP/UDP fragments
38 __wsum
csum_partial(const void *buff
, int len
, __wsum sum
)
40 unsigned long tmp1
, tmp2
;
42 * Experiments with ethernet and slip connections show that buff
43 * is aligned on either a 2-byte or 4-byte boundary.
45 __asm__("movel %2,%3\n\t"
46 "btst #1,%3\n\t" /* Check alignment */
48 "subql #2,%1\n\t" /* buff%4==2: treat first word */
50 "addql #2,%1\n\t" /* len was == 2, treat only rest */
53 "addw %2@+,%0\n\t" /* add first word to sum */
55 "addxl %3,%0\n" /* add X bit */
57 /* unrolled loop for the main part: do 8 longs at once */
58 "movel %1,%3\n\t" /* save len in tmp1 */
59 "lsrl #5,%1\n\t" /* len/32 */
60 "jeq 2f\n\t" /* not enough... */
81 "addxl %4,%0\n\t" /* add X bit */
86 "movel %3,%1\n\t" /* restore len from tmp1 */
87 "andw #0x1c,%3\n\t" /* number of rest longs */
92 /* loop for rest longs */
97 "addxl %4,%0\n" /* add X bit */
99 /* now check for rest bytes that do not fit into longs */
102 "clrl %4\n\t" /* clear tmp2 for rest bytes */
105 "movew %2@+,%4\n\t" /* have rest >= 2: get word */
106 "swap %4\n\t" /* into bits 16..31 */
107 "tstw %1\n\t" /* another byte? */
110 "moveb %2@,%4\n\t" /* have odd rest: get byte */
111 "lslw #8,%4\n\t" /* into bits 8..15; 16..31 untouched */
113 "addl %4,%0\n\t" /* now add rest long to sum */
115 "addxl %4,%0\n" /* add X bit */
117 : "=d" (sum
), "=d" (len
), "=a" (buff
),
118 "=&d" (tmp1
), "=&d" (tmp2
)
119 : "0" (sum
), "1" (len
), "2" (buff
)
124 EXPORT_SYMBOL(csum_partial
);
128 * copy from user space while checksumming, with exception handling.
132 csum_partial_copy_from_user(const void __user
*src
, void *dst
,
133 int len
, __wsum sum
, int *csum_err
)
136 * GCC doesn't like more than 10 operands for the asm
137 * statements so we have to use tmp2 for the error
140 unsigned long tmp1
, tmp2
;
142 __asm__("movel %2,%4\n\t"
143 "btst #1,%4\n\t" /* Check alignment */
145 "subql #2,%1\n\t" /* buff%4==2: treat first word */
147 "addql #2,%1\n\t" /* len was == 2, treat only rest */
151 "movesw %2@+,%4\n\t" /* add first word to sum */
155 "addxl %4,%0\n" /* add X bit */
157 /* unrolled loop for the main part: do 8 longs at once */
158 "movel %1,%4\n\t" /* save len in tmp1 */
159 "lsrl #5,%1\n\t" /* len/32 */
160 "jeq 2f\n\t" /* not enough... */
197 "addxl %5,%0\n\t" /* add X bit */
202 "movel %4,%1\n\t" /* restore len from tmp1 */
203 "andw #0x1c,%4\n\t" /* number of rest longs */
208 /* loop for rest longs */
215 "addxl %5,%0\n" /* add X bit */
217 /* now check for rest bytes that do not fit into longs */
220 "clrl %5\n\t" /* clear tmp2 for rest bytes */
224 "movesw %2@+,%5\n\t" /* have rest >= 2: get word */
226 "swap %5\n\t" /* into bits 16..31 */
227 "tstw %1\n\t" /* another byte? */
231 "movesb %2@,%5\n\t" /* have odd rest: get byte */
233 "lslw #8,%5\n\t" /* into bits 8..15; 16..31 untouched */
235 "addl %5,%0\n\t" /* now add rest long to sum */
237 "addxl %5,%0\n\t" /* add X bit */
239 "clrl %5\n" /* no error - clear return value */
241 ".section .fixup,\"ax\"\n"
243 /* If any exception occurs zero out the rest.
244 Similarities with the code above are intentional :-) */
290 #define STR(X) STR1(X)
292 "moveq #-" STR(EFAULT
) ",%5\n\t"
295 ".section __ex_table,\"a\"\n"
309 : "=d" (sum
), "=d" (len
), "=a" (src
), "=a" (dst
),
310 "=&d" (tmp1
), "=d" (tmp2
)
311 : "0" (sum
), "1" (len
), "2" (src
), "3" (dst
)
319 EXPORT_SYMBOL(csum_partial_copy_from_user
);
323 * copy from kernel space while checksumming, otherwise like csum_partial
327 csum_partial_copy_nocheck(const void *src
, void *dst
, int len
, __wsum sum
)
329 unsigned long tmp1
, tmp2
;
330 __asm__("movel %2,%4\n\t"
331 "btst #1,%4\n\t" /* Check alignment */
333 "subql #2,%1\n\t" /* buff%4==2: treat first word */
335 "addql #2,%1\n\t" /* len was == 2, treat only rest */
338 "movew %2@+,%4\n\t" /* add first word to sum */
342 "addxl %4,%0\n" /* add X bit */
344 /* unrolled loop for the main part: do 8 longs at once */
345 "movel %1,%4\n\t" /* save len in tmp1 */
346 "lsrl #5,%1\n\t" /* len/32 */
347 "jeq 2f\n\t" /* not enough... */
376 "addxl %5,%0\n\t" /* add X bit */
381 "movel %4,%1\n\t" /* restore len from tmp1 */
382 "andw #0x1c,%4\n\t" /* number of rest longs */
387 /* loop for rest longs */
393 "addxl %5,%0\n" /* add X bit */
395 /* now check for rest bytes that do not fit into longs */
398 "clrl %5\n\t" /* clear tmp2 for rest bytes */
401 "movew %2@+,%5\n\t" /* have rest >= 2: get word */
403 "swap %5\n\t" /* into bits 16..31 */
404 "tstw %1\n\t" /* another byte? */
407 "moveb %2@,%5\n\t" /* have odd rest: get byte */
409 "lslw #8,%5\n" /* into bits 8..15; 16..31 untouched */
411 "addl %5,%0\n\t" /* now add rest long to sum */
413 "addxl %5,%0\n" /* add X bit */
415 : "=d" (sum
), "=d" (len
), "=a" (src
), "=a" (dst
),
416 "=&d" (tmp1
), "=&d" (tmp2
)
417 : "0" (sum
), "1" (len
), "2" (src
), "3" (dst
)
421 EXPORT_SYMBOL(csum_partial_copy_nocheck
);