1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * linux/arch/arm/lib/csumpartial.S
5 * Copyright (C) 1995-1998 Russell King
7 #include <linux/linkage.h>
8 #include <asm/assembler.h>
13 * Function: __u32 csum_partial(const char *src, int len, __u32 sum)
14 * Params : r0 = buffer, r1 = len, r2 = checksum
15 * Returns : r0 = new checksum
22 td1 .req r4 @ save before use
23 td2 .req r5 @ save before use
31 * Handle 0 to 7 bytes, with any alignment of source and
32 * destination pointers. Note that when we get here, C = 0
34 .Lless8: teq len, #0 @ check for zero count
37 /* we must have at least one byte. */
38 tst buf, #1 @ odd address?
39 movne sum, sum, ror #8
42 adcsne sum, sum, td0, put_byte_1
47 /* we are now half-word aligned */
50 #if __LINUX_ARM_ARCH__ >= 4
58 orr td0, td0, td3, lsl #8
60 orr td0, td3, td0, lsl #8
67 .Lless8_byte: tst len, #1 @ odd number of bytes
68 ldrbne td0, [buf], #1 @ include last byte
69 adcsne sum, sum, td0, put_byte_0 @ update checksum
71 .Ldone: adc r0, sum, #0 @ collect up the last carry
73 tst td0, #1 @ check buffer alignment
74 movne r0, r0, ror #8 @ rotate checksum by 8 bits
75 ldr pc, [sp], #4 @ return
77 .Lnot_aligned: tst buf, #1 @ odd address
78 ldrbne td0, [buf], #1 @ make even
80 adcsne sum, sum, td0, put_byte_1 @ update checksum
82 tst buf, #2 @ 32-bit aligned?
83 #if __LINUX_ARM_ARCH__ >= 4
84 ldrhne td0, [buf], #2 @ make 32-bit aligned
91 orrne td0, td0, ip, lsl #8
93 orrne td0, ip, td0, lsl #8
96 adcsne sum, sum, td0 @ update checksum
101 cmp len, #8 @ Ensure that we have at least
102 blo .Lless8 @ 8 bytes to copy.
105 movne sum, sum, ror #8
107 adds sum, sum, #0 @ C = 0
108 tst buf, #3 @ Test destination alignment
109 blne .Lnot_aligned @ align destination, return here
115 2: ldmia buf!, {td0, td1, td2, td3}
120 ldmia buf!, {td0, td1, td2, td3}
130 3: tst len, #0x1c @ should not change C
133 4: ldr td0, [buf], #4
139 ENDPROC(csum_partial)