1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * IP/TCP/UDP checksumming routines
9 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Tom May, <ftom@netcom.com>
12 * Pentium Pro/II routines:
13 * Alexander Kjeldaas <astor@guardian.no>
14 * Finn Arne Gangstad <finnag@guardian.no>
15 * Lots of code moved from tcp.c and ip.c; see those files
18 * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
20 * Andi Kleen, add zeroing on error
21 * converted to pure assembler
24 #include <linux/linkage.h>
25 #include <asm/errno.h>
27 #include <asm/export.h>
28 #include <asm/nospec-branch.h>
31 * computes a partial checksum, e.g. for TCP/UDP fragments
35 unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
40 #ifndef CONFIG_X86_USE_PPRO_CHECKSUM
43 * Experiments with Ethernet and SLIP connections show that buff
44 * is aligned on either a 2-byte or 4-byte boundary. We get at
45 * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
46 * Fortunately, it is easy to convert 2-byte alignment to 4-byte
47 * alignment for the unrolled loop.
49 SYM_FUNC_START(csum_partial)
52 movl 20(%esp),%eax # Function arg: unsigned int sum
53 movl 16(%esp),%ecx # Function arg: int len
54 movl 12(%esp),%esi # Function arg: unsigned char *buff
55 testl $3, %esi # Check alignment.
56 jz 2f # Jump if alignment is ok.
57 testl $1, %esi # Check alignment.
58 jz 10f # Jump if alignment is boundary of 2 bytes.
70 subl $2, %ecx # Alignment uses up two bytes.
71 jae 1f # Jump if we had at least two bytes.
72 addl $2, %ecx # ecx was < 2. Deal with it.
106 shrl $2, %edx # This clears CF
131 SYM_FUNC_END(csum_partial)
135 /* Version for PentiumII/PPro */
137 SYM_FUNC_START(csum_partial)
140 movl 20(%esp),%eax # Function arg: unsigned int sum
141 movl 16(%esp),%ecx # Function arg: int len
142 movl 12(%esp),%esi # Function arg: const unsigned char *buf
154 lea 45f(%ebx,%ebx,2), %ebx
158 # Handle 2-byte-aligned regions
182 movzbl (%esi),%ebx # csumming 1 byte, 2-aligned
187 addw (%esi), %ax # csumming 2 bytes, 2-aligned
192 addl -128(%esi), %eax
193 adcl -124(%esi), %eax
194 adcl -120(%esi), %eax
195 adcl -116(%esi), %eax
196 adcl -112(%esi), %eax
197 adcl -108(%esi), %eax
198 adcl -104(%esi), %eax
199 adcl -100(%esi), %eax
233 # Handle the last 1-3 bytes without jumping
234 notl %ecx # 1->2, 2->1, 3->0, higher bits are masked
235 movl $0xffffff,%ebx # by the shll and shrl instructions
238 andl -128(%esi),%ebx # esi is 4-aligned so should be ok
249 SYM_FUNC_END(csum_partial)
252 EXPORT_SYMBOL(csum_partial)
255 unsigned int csum_partial_copy_generic (const char *src, char *dst,
260 * Copy from ds while checksumming, otherwise like csum_partial
265 _ASM_EXTABLE_UA(9999b, 6001f)
267 #ifndef CONFIG_X86_USE_PPRO_CHECKSUM
272 SYM_FUNC_START(csum_partial_copy_generic)
277 movl ARGBASE+12(%esp),%ecx # len
278 movl ARGBASE+4(%esp),%esi # src
279 movl ARGBASE+8(%esp),%edi # dst
282 testl $2, %edi # Check alignment.
283 jz 2f # Jump if alignment is ok.
284 subl $2, %ecx # Alignment uses up two bytes.
285 jae 1f # Jump if we had at least two bytes.
286 addl $2, %ecx # ecx was < 2. Deal with it.
288 EXC(1: movw (%esi), %bx )
290 EXC( movw %bx, (%edi) )
298 testl %esi, %esi # what's wrong with clc?
299 EXC(1: movl (%esi), %ebx )
300 EXC( movl 4(%esi), %edx )
302 EXC( movl %ebx, (%edi) )
304 EXC( movl %edx, 4(%edi) )
306 EXC( movl 8(%esi), %ebx )
307 EXC( movl 12(%esi), %edx )
309 EXC( movl %ebx, 8(%edi) )
311 EXC( movl %edx, 12(%edi) )
313 EXC( movl 16(%esi), %ebx )
314 EXC( movl 20(%esi), %edx )
316 EXC( movl %ebx, 16(%edi) )
318 EXC( movl %edx, 20(%edi) )
320 EXC( movl 24(%esi), %ebx )
321 EXC( movl 28(%esi), %edx )
323 EXC( movl %ebx, 24(%edi) )
325 EXC( movl %edx, 28(%edi) )
332 2: movl FP(%esp), %edx
336 shrl $2, %edx # This clears CF
337 EXC(3: movl (%esi), %ebx )
339 EXC( movl %ebx, (%edi) )
349 EXC( movw (%esi), %cx )
351 EXC( movw %cx, (%edi) )
355 EXC(5: movb (%esi), %cl )
356 EXC( movb %cl, (%edi) )
362 .section .fixup, "ax"
373 popl %ecx # equivalent to addl $4,%esp
375 SYM_FUNC_END(csum_partial_copy_generic)
379 /* Version for PentiumII/PPro */
382 EXC(movl x(%esi), %ebx ) ; \
384 EXC(movl %ebx, x(%edi) ) ;
387 EXC(movl x(%esi), %ebx ) ; \
389 EXC(movl %ebx, x(%edi) ) ;
393 SYM_FUNC_START(csum_partial_copy_generic)
397 movl ARGBASE+4(%esp),%esi #src
398 movl ARGBASE+8(%esp),%edi #dst
399 movl ARGBASE+12(%esp),%ecx #len
411 lea 3f(%ebx,%ebx), %ebx
416 EXC(movb -32(%edx),%bl) ; EXC(movb (%edx),%bl)
417 ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52)
418 ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36)
419 ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20)
420 ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4)
425 4: movl ARGBASE+12(%esp),%edx #len
430 EXC( movw (%esi), %dx )
432 EXC( movw %dx, (%edi) )
437 EXC( movb (%esi), %dl )
438 EXC( movb %dl, (%edi) )
442 .section .fixup, "ax"
443 6001: xorl %eax, %eax
451 SYM_FUNC_END(csum_partial_copy_generic)
457 EXPORT_SYMBOL(csum_partial_copy_generic)