Avoid beyond bounds copy while caching ACL
[zen-stable.git] / arch / avr32 / include / asm / checksum.h
blob4ddbfd2486af06412b32e7cf66af54ce1942d9e8
1 /*
2 * Copyright (C) 2004-2006 Atmel Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8 #ifndef __ASM_AVR32_CHECKSUM_H
9 #define __ASM_AVR32_CHECKSUM_H
12 * computes the checksum of a memory block at buff, length len,
13 * and adds in "sum" (32-bit)
15 * returns a 32-bit number suitable for feeding into itself
16 * or csum_tcpudp_magic
18 * this function must be called with even lengths, except
19 * for the last fragment, which may be odd
21 * it's best to have buff aligned on a 32-bit boundary
23 __wsum csum_partial(const void *buff, int len, __wsum sum);
26 * the same as csum_partial, but copies from src while it
27 * checksums, and handles user-space pointer exceptions correctly, when needed.
29 * here even more important to align src and dst on a 32-bit (or even
30 * better 64-bit) boundary
32 __wsum csum_partial_copy_generic(const void *src, void *dst, int len,
33 __wsum sum, int *src_err_ptr,
34 int *dst_err_ptr);
37 * Note: when you get a NULL pointer exception here this means someone
38 * passed in an incorrect kernel address to one of these functions.
40 * If you use these functions directly please don't forget the
41 * access_ok().
43 static inline
44 __wsum csum_partial_copy_nocheck(const void *src, void *dst,
45 int len, __wsum sum)
47 return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
50 static inline
51 __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
52 int len, __wsum sum, int *err_ptr)
54 return csum_partial_copy_generic((const void __force *)src, dst, len,
55 sum, err_ptr, NULL);
59 * This is a version of ip_compute_csum() optimized for IP headers,
60 * which always checksum on 4 octet boundaries.
62 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
64 unsigned int sum, tmp;
66 __asm__ __volatile__(
67 " ld.w %0, %1++\n"
68 " ld.w %3, %1++\n"
69 " sub %2, 4\n"
70 " add %0, %3\n"
71 " ld.w %3, %1++\n"
72 " adc %0, %0, %3\n"
73 " ld.w %3, %1++\n"
74 " adc %0, %0, %3\n"
75 " acr %0\n"
76 "1: ld.w %3, %1++\n"
77 " add %0, %3\n"
78 " acr %0\n"
79 " sub %2, 1\n"
80 " brne 1b\n"
81 " lsl %3, %0, 16\n"
82 " andl %0, 0\n"
83 " mov %2, 0xffff\n"
84 " add %0, %3\n"
85 " adc %0, %0, %2\n"
86 " com %0\n"
87 " lsr %0, 16\n"
88 : "=r"(sum), "=r"(iph), "=r"(ihl), "=r"(tmp)
89 : "1"(iph), "2"(ihl)
90 : "memory", "cc");
91 return (__force __sum16)sum;
95 * Fold a partial checksum
98 static inline __sum16 csum_fold(__wsum sum)
100 unsigned int tmp;
102 asm(" bfextu %1, %0, 0, 16\n"
103 " lsr %0, 16\n"
104 " add %0, %1\n"
105 " bfextu %1, %0, 16, 16\n"
106 " add %0, %1"
107 : "=&r"(sum), "=&r"(tmp)
108 : "0"(sum));
110 return (__force __sum16)~sum;
113 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
114 unsigned short len,
115 unsigned short proto,
116 __wsum sum)
118 asm(" add %0, %1\n"
119 " adc %0, %0, %2\n"
120 " adc %0, %0, %3\n"
121 " acr %0"
122 : "=r"(sum)
123 : "r"(daddr), "r"(saddr), "r"(len + proto),
124 "0"(sum)
125 : "cc");
127 return sum;
131 * computes the checksum of the TCP/UDP pseudo-header
132 * returns a 16-bit checksum, already complemented
134 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
135 unsigned short len,
136 unsigned short proto,
137 __wsum sum)
139 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
143 * this routine is used for miscellaneous IP-like checksums, mainly
144 * in icmp.c
147 static inline __sum16 ip_compute_csum(const void *buff, int len)
149 return csum_fold(csum_partial(buff, len, 0));
152 #endif /* __ASM_AVR32_CHECKSUM_H */