1 #ifndef _ASM_POWERPC_CHECKSUM_H
2 #define _ASM_POWERPC_CHECKSUM_H
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #ifdef CONFIG_GENERIC_CSUM
13 #include <asm-generic/checksum.h>
16 * Computes the checksum of a memory block at src, length len,
17 * and adds in "sum" (32-bit), while copying the block to dst.
18 * If an access exception occurs on src or dst, it stores -EFAULT
19 * to *src_err or *dst_err respectively (if that pointer is not
20 * NULL), and, for an error on src, zeroes the rest of dst.
22 * Like csum_partial, this must be called with even lengths,
23 * except for the last fragment.
25 extern __wsum
csum_partial_copy_generic(const void *src
, void *dst
,
27 int *src_err
, int *dst_err
);
29 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
30 extern __wsum
csum_and_copy_from_user(const void __user
*src
, void *dst
,
31 int len
, __wsum sum
, int *err_ptr
);
32 #define HAVE_CSUM_COPY_USER
33 extern __wsum
csum_and_copy_to_user(const void *src
, void __user
*dst
,
34 int len
, __wsum sum
, int *err_ptr
);
36 #define csum_partial_copy_nocheck(src, dst, len, sum) \
37 csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
41 * turns a 32-bit partial checksum (e.g. from csum_partial) into a
42 * 1's complement 16-bit checksum.
44 static inline __sum16
csum_fold(__wsum sum
)
48 /* swap the two 16-bit halves of sum */
49 __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp
) : "r" (sum
));
50 /* if there is a carry from adding the two 16-bit halves,
51 it will carry from the lower half into the upper half,
52 giving us the correct sum in the upper half. */
53 return (__force __sum16
)(~((__force u32
)sum
+ tmp
) >> 16);
56 static inline u32
from64to32(u64 x
)
58 /* add up 32-bit and 32-bit for 32+c bit */
59 x
= (x
& 0xffffffff) + (x
>> 32);
61 x
= (x
& 0xffffffff) + (x
>> 32);
65 static inline __wsum
csum_tcpudp_nofold(__be32 saddr
, __be32 daddr
, __u32 len
,
66 __u8 proto
, __wsum sum
)
69 u64 s
= (__force u32
)sum
;
71 s
+= (__force u32
)saddr
;
72 s
+= (__force u32
)daddr
;
76 s
+= (proto
+ len
) << 8;
78 return (__force __wsum
) from64to32(s
);
87 : "r" (daddr
), "r"(saddr
), "r"(proto
+ len
), "0"(sum
));
93 * computes the checksum of the TCP/UDP pseudo-header
94 * returns a 16-bit checksum, already complemented
96 static inline __sum16
csum_tcpudp_magic(__be32 saddr
, __be32 daddr
, __u32 len
,
97 __u8 proto
, __wsum sum
)
99 return csum_fold(csum_tcpudp_nofold(saddr
, daddr
, len
, proto
, sum
));
102 #define HAVE_ARCH_CSUM_ADD
103 static inline __wsum
csum_add(__wsum csum
, __wsum addend
)
106 u64 res
= (__force u64
)csum
;
108 if (__builtin_constant_p(csum
) && csum
== 0)
110 if (__builtin_constant_p(addend
) && addend
== 0)
114 res
+= (__force u64
)addend
;
115 return (__force __wsum
) from64to32(res
);
119 : "+r" (csum
) : "r" (addend
) : "xer");
125 * This is a version of ip_compute_csum() optimized for IP headers,
126 * which always checksum on 4 octet boundaries. ihl is the number
127 * of 32-bit words and is always >= 5.
129 static inline __wsum
ip_fast_csum_nofold(const void *iph
, unsigned int ihl
)
131 const u32
*ptr
= (const u32
*)iph
+ 1;
134 u64 s
= *(const u32
*)iph
;
136 for (i
= 0; i
< ihl
- 1; i
++, ptr
++)
138 return (__force __wsum
)from64to32(s
);
148 : "=r" (sum
), "=r" (tmp
), "+b" (ptr
)
149 : "r" (ihl
- 2), "r" (*(const u32
*)iph
), "r" (*ptr
)
150 : "ctr", "xer", "memory");
156 static inline __sum16
ip_fast_csum(const void *iph
, unsigned int ihl
)
158 return csum_fold(ip_fast_csum_nofold(iph
, ihl
));
162 * computes the checksum of a memory block at buff, length len,
163 * and adds in "sum" (32-bit)
165 * returns a 32-bit number suitable for feeding into itself
166 * or csum_tcpudp_magic
168 * this function must be called with even lengths, except
169 * for the last fragment, which may be odd
171 * it's best to have buff aligned on a 32-bit boundary
173 __wsum
__csum_partial(const void *buff
, int len
, __wsum sum
);
175 static inline __wsum
csum_partial(const void *buff
, int len
, __wsum sum
)
177 if (__builtin_constant_p(len
) && len
<= 16 && (len
& 1) == 0) {
179 sum
= csum_add(sum
, (__force __wsum
)*(const u16
*)buff
);
181 sum
= csum_add(sum
, (__force __wsum
)*(const u32
*)buff
);
183 sum
= csum_add(sum
, (__force __wsum
)
184 *(const u16
*)(buff
+ 4));
186 sum
= csum_add(sum
, (__force __wsum
)
187 *(const u32
*)(buff
+ 4));
189 sum
= csum_add(sum
, (__force __wsum
)
190 *(const u16
*)(buff
+ 8));
192 sum
= csum_add(sum
, (__force __wsum
)
193 *(const u32
*)(buff
+ 8));
195 sum
= csum_add(sum
, (__force __wsum
)
196 *(const u16
*)(buff
+ 12));
198 sum
= csum_add(sum
, (__force __wsum
)
199 *(const u32
*)(buff
+ 12));
200 } else if (__builtin_constant_p(len
) && (len
& 3) == 0) {
201 sum
= csum_add(sum
, ip_fast_csum_nofold(buff
, len
>> 2));
203 sum
= __csum_partial(buff
, len
, sum
);
209 * this routine is used for miscellaneous IP-like checksums, mainly
212 static inline __sum16
ip_compute_csum(const void *buff
, int len
)
214 return csum_fold(csum_partial(buff
, len
, 0));
218 #endif /* __KERNEL__ */