1 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
2 /* Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 * Copyright 2002, 2003 Andi Kleen, SuSE Labs.
6 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
7 * Subject to the GNU Public License v.2
8 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
12 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
13 * Wrappers of assembly checksum functions for x86-64.
15 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
18 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
19 #include <asm/checksum.h>
20 #include <linux/module.h>
22 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
24 * csum_partial_copy_from_user - Copy and checksum from user space.
25 * @src: source address (user space)
28 * csum_partial_copy_from_user - Copy and checksum from user space.
29 * @src: source address (user space)
30 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
31 * @dst: destination address
32 * @len: number of bytes to be copied.
33 * @isum: initial sum that is added into the result (32bit unfolded)
34 * @errp: set to -EFAULT for an bad source address.
35 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
39 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
40 * Returns an 32bit unfolded checksum of the buffer.
41 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
42 * src and dst are best aligned to 64bits.
45 * src
and dst are best aligned to
64bits
.
47 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
49 csum_partial_copy_from_user(const void __user
*src
, void *dst
,
50 int len
, __wsum isum
, int *errp
)
51 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
55 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
58 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
59 if (likely(access_ok(VERIFY_READ
,src
, len
))) {
60 /* Why 6, not 7? To handle odd addresses aligned we
61 would need to do considerable complications to fix the
62 checksum which is defined as an 16bit accumulator. The
63 fix alignment code is primarily for performance
64 compatibility with 32bit and that will handle odd
65 addresses slowly too. */
66 if (unlikely((unsigned long)src
& 6)) {
67 while (((unsigned long)src
& 6) && len
>= 2) {
69 *errp
= __get_user(val16
, (const __u16 __user
*)src
);
72 *(__u16
*)dst
= val16
;
73 isum
= (__force __wsum
)add32_with_carry(
74 (__force
unsigned)isum
, val16
);
81 if (!likely(access_ok(VERIFY_READ
, src
, len
)))
85 * Why 6, not 7? To handle odd addresses aligned we
86 * would need to do considerable complications to fix the
87 * checksum which is defined as an 16bit accumulator. The
88 * fix alignment code is primarily for performance
89 * compatibility with 32bit and that will handle odd
90 * addresses slowly too.
92 if (unlikely((unsigned long)src
& 6)) {
93 while (((unsigned long)src
& 6) && len
>= 2) {
96 *errp
= __get_user(val16
, (const __u16 __user
*)src
);
100 *(__u16
*)dst
= val16
;
101 isum
= (__force __wsum
)add32_with_carry(
102 (__force
unsigned)isum
, val16
);
106 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
108 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
109 isum
= csum_partial_copy_generic((__force
const void *)src
,
110 dst
, len
, isum
, errp
, NULL
);
111 if (likely(*errp
== 0))
116 isum
= csum_partial_copy_generic((__force
const void *)src
,
117 dst
, len
, isum
, errp
, NULL
);
124 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
126 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
132 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
134 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
138 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
139 EXPORT_SYMBOL(csum_partial_copy_from_user
);
141 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
143 * csum_partial_copy_to_user - Copy and checksum to user space.
146 * csum_partial_copy_to_user - Copy and checksum to user space.
147 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
148 * @src: source address
149 * @dst: destination address (user space)
150 * @len: number of bytes to be copied.
151 * @isum: initial sum that is added into the result (32bit unfolded)
152 * @errp: set to -EFAULT for an bad destination address.
153 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
157 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
158 * Returns an 32bit unfolded checksum of the buffer.
159 * src and dst are best aligned to 64bits.
160 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
164 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
166 csum_partial_copy_to_user(const void *src
, void __user
*dst
,
167 int len
, __wsum isum
, int *errp
)
168 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
172 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
174 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
177 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
178 if (unlikely(!access_ok(VERIFY_WRITE
, dst
, len
))) {
180 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
184 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
187 if (unlikely((unsigned long)dst
& 6)) {
188 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
189 while (((unsigned long)dst
& 6) && len
>= 2) {
191 while (((unsigned long)dst
& 6) && len
>= 2) {
192 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
193 __u16 val16
= *(__u16
*)src
;
194 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
197 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
198 isum
= (__force __wsum
)add32_with_carry(
199 (__force
unsigned)isum
, val16
);
200 *errp
= __put_user(val16
, (__u16 __user
*)dst
);
203 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
209 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
215 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
216 return csum_partial_copy_generic(src
, (void __force
*)dst
,len
,isum
,NULL
,errp
);
220 return csum_partial_copy_generic(src
, (void __force
*)dst
,
221 len
, isum
, NULL
, errp
);
223 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
224 EXPORT_SYMBOL(csum_partial_copy_to_user
);
226 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
230 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
231 * csum_partial_copy_nocheck - Copy and checksum.
232 * @src: source address
233 * @dst: destination address
234 * @len: number of bytes to be copied.
235 * @isum: initial sum that is added into the result (32bit unfolded)
236 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
240 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/lib/csum-wrappers_64.c
241 * Returns an 32bit unfolded checksum of the buffer.
242 <<<<<<< HEAD:arch/x86/lib/csum-wrappers_64.c
246 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
248 csum_partial_copy_nocheck(const void *src
, void *dst
, int len
, __wsum sum
)
249 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
251 return csum_partial_copy_generic(src
,dst
,len
,sum
,NULL
,NULL
);
255 return csum_partial_copy_generic(src
, dst
, len
, sum
, NULL
, NULL
);
257 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
258 EXPORT_SYMBOL(csum_partial_copy_nocheck
);
260 __sum16
csum_ipv6_magic(const struct in6_addr
*saddr
,
261 const struct in6_addr
*daddr
,
262 __u32 len
, unsigned short proto
, __wsum sum
)
265 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
269 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
270 rest
= (__force __u64
)htonl(len
) + (__force __u64
)htons(proto
) +
272 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
273 asm(" addq (%[saddr]),%[sum]\n"
274 " adcq 8(%[saddr]),%[sum]\n"
275 " adcq (%[daddr]),%[sum]\n"
276 " adcq 8(%[daddr]),%[sum]\n"
279 : "[sum]" (rest
),[saddr
] "r" (saddr
), [daddr
] "r" (daddr
));
280 return csum_fold((__force __wsum
)add32_with_carry(sum64
& 0xffffffff, sum64
>>32));
283 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
285 <<<<<<< HEAD
:arch
/x86
/lib
/csum
-wrappers_64
.c
287 asm(" addq (%[saddr]),%[sum]\n"
288 " adcq 8(%[saddr]),%[sum]\n"
289 " adcq (%[daddr]),%[sum]\n"
290 " adcq 8(%[daddr]),%[sum]\n"
294 : "[sum]" (rest
), [saddr
] "r" (saddr
), [daddr
] "r" (daddr
));
297 (__force __wsum
)add32_with_carry(sum64
& 0xffffffff, sum64
>>32));
299 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/lib
/csum
-wrappers_64
.c
300 EXPORT_SYMBOL(csum_ipv6_magic
);