Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / arch / x86 / lib / csum-wrappers_64.c
blob9e06ac47322b0fc81f0d3541ffe81b2c882f2b70
1 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
2 /* Copyright 2002,2003 Andi Kleen, SuSE Labs.
3 =======
4 /*
5 * Copyright 2002, 2003 Andi Kleen, SuSE Labs.
6 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
7 * Subject to the GNU Public License v.2
8 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
9 *
10 =======
12 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
13 * Wrappers of assembly checksum functions for x86-64.
15 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
17 =======
18 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
19 #include <asm/checksum.h>
20 #include <linux/module.h>
22 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
23 /**
24 * csum_partial_copy_from_user - Copy and checksum from user space.
25 * @src: source address (user space)
26 =======
27 /**
28 * csum_partial_copy_from_user - Copy and checksum from user space.
29 * @src: source address (user space)
30 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
31 * @dst: destination address
32 * @len: number of bytes to be copied.
33 * @isum: initial sum that is added into the result (32bit unfolded)
34 * @errp: set to -EFAULT for an bad source address.
35 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
37 =======
39 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
40 * Returns an 32bit unfolded checksum of the buffer.
41 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
42 * src and dst are best aligned to 64bits.
43 */
44 =======
45 * src and dst are best aligned to 64bits.
47 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
48 __wsum
49 csum_partial_copy_from_user(const void __user *src, void *dst,
50 int len, __wsum isum, int *errp)
51 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
53 =======
55 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
56 might_sleep();
57 *errp = 0;
58 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
59 if (likely(access_ok(VERIFY_READ,src, len))) {
60 /* Why 6, not 7? To handle odd addresses aligned we
61 would need to do considerable complications to fix the
62 checksum which is defined as an 16bit accumulator. The
63 fix alignment code is primarily for performance
64 compatibility with 32bit and that will handle odd
65 addresses slowly too. */
66 if (unlikely((unsigned long)src & 6)) {
67 while (((unsigned long)src & 6) && len >= 2) {
68 __u16 val16;
69 *errp = __get_user(val16, (const __u16 __user *)src);
70 if (*errp)
71 return isum;
72 *(__u16 *)dst = val16;
73 isum = (__force __wsum)add32_with_carry(
74 (__force unsigned)isum, val16);
75 src += 2;
76 dst += 2;
77 len -= 2;
79 =======
81 if (!likely(access_ok(VERIFY_READ, src, len)))
82 goto out_err;
85 * Why 6, not 7? To handle odd addresses aligned we
86 * would need to do considerable complications to fix the
87 * checksum which is defined as an 16bit accumulator. The
88 * fix alignment code is primarily for performance
89 * compatibility with 32bit and that will handle odd
90 * addresses slowly too.
92 if (unlikely((unsigned long)src & 6)) {
93 while (((unsigned long)src & 6) && len >= 2) {
94 __u16 val16;
96 *errp = __get_user(val16, (const __u16 __user *)src);
97 if (*errp)
98 return isum;
100 *(__u16 *)dst = val16;
101 isum = (__force __wsum)add32_with_carry(
102 (__force unsigned)isum, val16);
103 src += 2;
104 dst += 2;
105 len -= 2;
106 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
108 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
109 isum = csum_partial_copy_generic((__force const void *)src,
110 dst, len, isum, errp, NULL);
111 if (likely(*errp == 0))
112 return isum;
114 =======
116 isum = csum_partial_copy_generic((__force const void *)src,
117 dst, len, isum, errp, NULL);
118 if (unlikely(*errp))
119 goto out_err;
121 return isum;
123 out_err:
124 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
125 *errp = -EFAULT;
126 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
127 memset(dst,0,len);
128 return isum;
130 =======
131 memset(dst, 0, len);
132 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
134 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
135 =======
136 return isum;
138 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
139 EXPORT_SYMBOL(csum_partial_copy_from_user);
141 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
142 /**
143 * csum_partial_copy_to_user - Copy and checksum to user space.
144 =======
146 * csum_partial_copy_to_user - Copy and checksum to user space.
147 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
148 * @src: source address
149 * @dst: destination address (user space)
150 * @len: number of bytes to be copied.
151 * @isum: initial sum that is added into the result (32bit unfolded)
152 * @errp: set to -EFAULT for an bad destination address.
153 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
155 =======
157 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
158 * Returns an 32bit unfolded checksum of the buffer.
159 * src and dst are best aligned to 64bits.
160 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
162 =======
164 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
165 __wsum
166 csum_partial_copy_to_user(const void *src, void __user *dst,
167 int len, __wsum isum, int *errp)
168 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
170 =======
172 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
173 might_sleep();
174 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
175 =======
177 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
178 if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
179 *errp = -EFAULT;
180 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
181 return 0;
182 =======
183 return 0;
184 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
187 if (unlikely((unsigned long)dst & 6)) {
188 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
189 while (((unsigned long)dst & 6) && len >= 2) {
190 =======
191 while (((unsigned long)dst & 6) && len >= 2) {
192 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
193 __u16 val16 = *(__u16 *)src;
194 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
195 =======
197 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
198 isum = (__force __wsum)add32_with_carry(
199 (__force unsigned)isum, val16);
200 *errp = __put_user(val16, (__u16 __user *)dst);
201 if (*errp)
202 return isum;
203 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
204 src += 2;
205 dst += 2;
206 =======
207 src += 2;
208 dst += 2;
209 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
210 len -= 2;
214 *errp = 0;
215 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
216 return csum_partial_copy_generic(src, (void __force *)dst,len,isum,NULL,errp);
219 =======
220 return csum_partial_copy_generic(src, (void __force *)dst,
221 len, isum, NULL, errp);
223 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
224 EXPORT_SYMBOL(csum_partial_copy_to_user);
226 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
227 /**
228 =======
230 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
231 * csum_partial_copy_nocheck - Copy and checksum.
232 * @src: source address
233 * @dst: destination address
234 * @len: number of bytes to be copied.
235 * @isum: initial sum that is added into the result (32bit unfolded)
236 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
238 =======
240 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
241 * Returns an 32bit unfolded checksum of the buffer.
242 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
244 =======
246 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
247 __wsum
248 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
249 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
251 return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL);
253 =======
255 return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
257 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
258 EXPORT_SYMBOL(csum_partial_copy_nocheck);
260 __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
261 const struct in6_addr *daddr,
262 __u32 len, unsigned short proto, __wsum sum)
264 __u64 rest, sum64;
265 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
267 =======
269 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
270 rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) +
271 (__force __u64)sum;
272 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
273 asm(" addq (%[saddr]),%[sum]\n"
274 " adcq 8(%[saddr]),%[sum]\n"
275 " adcq (%[daddr]),%[sum]\n"
276 " adcq 8(%[daddr]),%[sum]\n"
277 " adcq $0,%[sum]\n"
278 : [sum] "=r" (sum64)
279 : "[sum]" (rest),[saddr] "r" (saddr), [daddr] "r" (daddr));
280 return csum_fold((__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
282 =======
283 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
285 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
286 =======
287 asm(" addq (%[saddr]),%[sum]\n"
288 " adcq 8(%[saddr]),%[sum]\n"
289 " adcq (%[daddr]),%[sum]\n"
290 " adcq 8(%[daddr]),%[sum]\n"
291 " adcq $0,%[sum]\n"
293 : [sum] "=r" (sum64)
294 : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr));
296 return csum_fold(
297 (__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
299 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
300 EXPORT_SYMBOL(csum_ipv6_magic);