x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / powerpc / include / asm / checksum.h
blob842124b199b5859f6d0f61cb6b7c09ed08854d96
1 #ifndef _ASM_POWERPC_CHECKSUM_H
2 #define _ASM_POWERPC_CHECKSUM_H
3 #ifdef __KERNEL__
5 /*
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #ifdef CONFIG_GENERIC_CSUM
13 #include <asm-generic/checksum.h>
14 #else
16 * Computes the checksum of a memory block at src, length len,
17 * and adds in "sum" (32-bit), while copying the block to dst.
18 * If an access exception occurs on src or dst, it stores -EFAULT
19 * to *src_err or *dst_err respectively (if that pointer is not
20 * NULL), and, for an error on src, zeroes the rest of dst.
22 * Like csum_partial, this must be called with even lengths,
23 * except for the last fragment.
25 extern __wsum csum_partial_copy_generic(const void *src, void *dst,
26 int len, __wsum sum,
27 int *src_err, int *dst_err);
29 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
30 extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
31 int len, __wsum sum, int *err_ptr);
32 #define HAVE_CSUM_COPY_USER
33 extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
34 int len, __wsum sum, int *err_ptr);
36 #define csum_partial_copy_nocheck(src, dst, len, sum) \
37 csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
41 * turns a 32-bit partial checksum (e.g. from csum_partial) into a
42 * 1's complement 16-bit checksum.
44 static inline __sum16 csum_fold(__wsum sum)
46 unsigned int tmp;
48 /* swap the two 16-bit halves of sum */
49 __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum));
50 /* if there is a carry from adding the two 16-bit halves,
51 it will carry from the lower half into the upper half,
52 giving us the correct sum in the upper half. */
53 return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
56 static inline u32 from64to32(u64 x)
58 /* add up 32-bit and 32-bit for 32+c bit */
59 x = (x & 0xffffffff) + (x >> 32);
60 /* add up carry.. */
61 x = (x & 0xffffffff) + (x >> 32);
62 return (u32)x;
65 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
66 __u8 proto, __wsum sum)
68 #ifdef __powerpc64__
69 u64 s = (__force u32)sum;
71 s += (__force u32)saddr;
72 s += (__force u32)daddr;
73 #ifdef __BIG_ENDIAN__
74 s += proto + len;
75 #else
76 s += (proto + len) << 8;
77 #endif
78 return (__force __wsum) from64to32(s);
79 #else
80 __asm__("\n\
81 addc %0,%0,%1 \n\
82 adde %0,%0,%2 \n\
83 adde %0,%0,%3 \n\
84 addze %0,%0 \n\
86 : "=r" (sum)
87 : "r" (daddr), "r"(saddr), "r"(proto + len), "0"(sum));
88 return sum;
89 #endif
93 * computes the checksum of the TCP/UDP pseudo-header
94 * returns a 16-bit checksum, already complemented
96 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
97 __u8 proto, __wsum sum)
99 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
102 #define HAVE_ARCH_CSUM_ADD
103 static inline __wsum csum_add(__wsum csum, __wsum addend)
105 #ifdef __powerpc64__
106 u64 res = (__force u64)csum;
107 #endif
108 if (__builtin_constant_p(csum) && csum == 0)
109 return addend;
110 if (__builtin_constant_p(addend) && addend == 0)
111 return csum;
113 #ifdef __powerpc64__
114 res += (__force u64)addend;
115 return (__force __wsum) from64to32(res);
116 #else
117 asm("addc %0,%0,%1;"
118 "addze %0,%0;"
119 : "+r" (csum) : "r" (addend) : "xer");
120 return csum;
121 #endif
125 * This is a version of ip_compute_csum() optimized for IP headers,
126 * which always checksum on 4 octet boundaries. ihl is the number
127 * of 32-bit words and is always >= 5.
129 static inline __wsum ip_fast_csum_nofold(const void *iph, unsigned int ihl)
131 const u32 *ptr = (const u32 *)iph + 1;
132 #ifdef __powerpc64__
133 unsigned int i;
134 u64 s = *(const u32 *)iph;
136 for (i = 0; i < ihl - 1; i++, ptr++)
137 s += *ptr;
138 return (__force __wsum)from64to32(s);
139 #else
140 __wsum sum, tmp;
142 asm("mtctr %3;"
143 "addc %0,%4,%5;"
144 "1: lwzu %1, 4(%2);"
145 "adde %0,%0,%1;"
146 "bdnz 1b;"
147 "addze %0,%0;"
148 : "=r" (sum), "=r" (tmp), "+b" (ptr)
149 : "r" (ihl - 2), "r" (*(const u32 *)iph), "r" (*ptr)
150 : "ctr", "xer", "memory");
152 return sum;
153 #endif
156 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
158 return csum_fold(ip_fast_csum_nofold(iph, ihl));
162 * computes the checksum of a memory block at buff, length len,
163 * and adds in "sum" (32-bit)
165 * returns a 32-bit number suitable for feeding into itself
166 * or csum_tcpudp_magic
168 * this function must be called with even lengths, except
169 * for the last fragment, which may be odd
171 * it's best to have buff aligned on a 32-bit boundary
173 __wsum __csum_partial(const void *buff, int len, __wsum sum);
175 static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
177 if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0) {
178 if (len == 2)
179 sum = csum_add(sum, (__force __wsum)*(const u16 *)buff);
180 if (len >= 4)
181 sum = csum_add(sum, (__force __wsum)*(const u32 *)buff);
182 if (len == 6)
183 sum = csum_add(sum, (__force __wsum)
184 *(const u16 *)(buff + 4));
185 if (len >= 8)
186 sum = csum_add(sum, (__force __wsum)
187 *(const u32 *)(buff + 4));
188 if (len == 10)
189 sum = csum_add(sum, (__force __wsum)
190 *(const u16 *)(buff + 8));
191 if (len >= 12)
192 sum = csum_add(sum, (__force __wsum)
193 *(const u32 *)(buff + 8));
194 if (len == 14)
195 sum = csum_add(sum, (__force __wsum)
196 *(const u16 *)(buff + 12));
197 if (len >= 16)
198 sum = csum_add(sum, (__force __wsum)
199 *(const u32 *)(buff + 12));
200 } else if (__builtin_constant_p(len) && (len & 3) == 0) {
201 sum = csum_add(sum, ip_fast_csum_nofold(buff, len >> 2));
202 } else {
203 sum = __csum_partial(buff, len, sum);
205 return sum;
209 * this routine is used for miscellaneous IP-like checksums, mainly
210 * in icmp.c
212 static inline __sum16 ip_compute_csum(const void *buff, int len)
214 return csum_fold(csum_partial(buff, len, 0));
217 #endif
218 #endif /* __KERNEL__ */
219 #endif