2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1995, 96, 97, 98, 99, 2001 by Ralf Baechle
7 * Copyright (C) 1999 Silicon Graphics, Inc.
8 * Copyright (C) 2001 Thiemo Seufer.
9 * Copyright (C) 2002 Maciej W. Rozycki
10 * Copyright (C) 2014 Imagination Technologies Ltd.
12 #ifndef _ASM_CHECKSUM_H
13 #define _ASM_CHECKSUM_H
15 #ifdef CONFIG_GENERIC_CSUM
16 #include <asm-generic/checksum.h>
19 #include <linux/in6.h>
21 #include <asm/uaccess.h>
24 * computes the checksum of a memory block at buff, length len,
25 * and adds in "sum" (32-bit)
27 * returns a 32-bit number suitable for feeding into itself
28 * or csum_tcpudp_magic
30 * this function must be called with even lengths, except
31 * for the last fragment, which may be odd
33 * it's best to have buff aligned on a 32-bit boundary
35 __wsum
csum_partial(const void *buff
, int len
, __wsum sum
);
37 __wsum
__csum_partial_copy_kernel(const void *src
, void *dst
,
38 int len
, __wsum sum
, int *err_ptr
);
40 __wsum
__csum_partial_copy_from_user(const void *src
, void *dst
,
41 int len
, __wsum sum
, int *err_ptr
);
42 __wsum
__csum_partial_copy_to_user(const void *src
, void *dst
,
43 int len
, __wsum sum
, int *err_ptr
);
45 * this is a new version of the above that records errors it finds in *errp,
46 * but continues and zeros the rest of the buffer.
49 __wsum
csum_partial_copy_from_user(const void __user
*src
, void *dst
, int len
,
50 __wsum sum
, int *err_ptr
)
53 if (segment_eq(get_fs(), get_ds()))
54 return __csum_partial_copy_kernel((__force
void *)src
, dst
,
57 return __csum_partial_copy_from_user((__force
void *)src
, dst
,
61 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
63 __wsum
csum_and_copy_from_user(const void __user
*src
, void *dst
,
64 int len
, __wsum sum
, int *err_ptr
)
66 if (access_ok(VERIFY_READ
, src
, len
))
67 return csum_partial_copy_from_user(src
, dst
, len
, sum
,
76 * Copy and checksum to user
78 #define HAVE_CSUM_COPY_USER
80 __wsum
csum_and_copy_to_user(const void *src
, void __user
*dst
, int len
,
81 __wsum sum
, int *err_ptr
)
84 if (access_ok(VERIFY_WRITE
, dst
, len
)) {
85 if (segment_eq(get_fs(), get_ds()))
86 return __csum_partial_copy_kernel(src
,
90 return __csum_partial_copy_to_user(src
,
97 return (__force __wsum
)-1; /* invalid checksum */
101 * the same as csum_partial, but copies from user space (but on MIPS
102 * we have just one address space, so this is identical to the above)
104 __wsum
csum_partial_copy_nocheck(const void *src
, void *dst
,
105 int len
, __wsum sum
);
106 #define csum_partial_copy_nocheck csum_partial_copy_nocheck
109 * Fold a partial checksum without adding pseudo headers
111 static inline __sum16
csum_fold(__wsum csum
)
113 u32 sum
= (__force u32
)csum
;;
120 return (__force __sum16
)~sum
;
122 #define csum_fold csum_fold
125 * This is a version of ip_compute_csum() optimized for IP headers,
126 * which always checksum on 4 octet boundaries.
128 * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
131 static inline __sum16
ip_fast_csum(const void *iph
, unsigned int ihl
)
133 const unsigned int *word
= iph
;
134 const unsigned int *stop
= word
+ ihl
;
140 carry
= (csum
< word
[1]);
144 carry
= (csum
< word
[2]);
148 carry
= (csum
< word
[3]);
154 carry
= (csum
< *word
);
157 } while (word
!= stop
);
159 return csum_fold(csum
);
161 #define ip_fast_csum ip_fast_csum
163 static inline __wsum
csum_tcpudp_nofold(__be32 saddr
,
164 __be32 daddr
, unsigned short len
, unsigned short proto
,
168 " .set push # csum_tcpudp_nofold\n"
172 " sltu $1, %0, %2 \n"
176 " sltu $1, %0, %3 \n"
180 " sltu $1, %0, %4 \n"
187 " dsll32 $1, %0, 0 \n"
189 " dsra32 %0, %0, 0 \n"
193 : "0" ((__force
unsigned long)daddr
),
194 "r" ((__force
unsigned long)saddr
),
196 "r" ((proto
+ len
) << 8),
200 "r" ((__force
unsigned long)sum
));
204 #define csum_tcpudp_nofold csum_tcpudp_nofold
207 * this routine is used for miscellaneous IP-like checksums, mainly
210 static inline __sum16
ip_compute_csum(const void *buff
, int len
)
212 return csum_fold(csum_partial(buff
, len
, 0));
215 #define _HAVE_ARCH_IPV6_CSUM
216 static __inline__ __sum16
csum_ipv6_magic(const struct in6_addr
*saddr
,
217 const struct in6_addr
*daddr
,
218 __u32 len
, unsigned short proto
,
224 " .set push # csum_ipv6_magic\n"
227 " addu %0, %5 # proto (long in network byte order)\n"
228 " sltu $1, %0, %5 \n"
231 " addu %0, %6 # csum\n"
232 " sltu $1, %0, %6 \n"
233 " lw %1, 0(%2) # four words source address\n"
236 " sltu $1, %0, %1 \n"
241 " sltu $1, %0, %1 \n"
246 " sltu $1, %0, %1 \n"
251 " sltu $1, %0, %1 \n"
256 " sltu $1, %0, %1 \n"
261 " sltu $1, %0, %1 \n"
266 " sltu $1, %0, %1 \n"
271 " sltu $1, %0, %1 \n"
273 " addu %0, $1 # Add final carry\n"
275 : "=&r" (sum
), "=&r" (tmp
)
276 : "r" (saddr
), "r" (daddr
),
277 "0" (htonl(len
)), "r" (htonl(proto
)), "r" (sum
));
279 return csum_fold(sum
);
282 #include <asm-generic/checksum.h>
283 #endif /* CONFIG_GENERIC_CSUM */
285 #endif /* _ASM_CHECKSUM_H */