1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_CHECKSUM_H
3 #define _ASM_POWERPC_CHECKSUM_H
9 #include <linux/bitops.h>
10 #include <linux/in6.h>
12 * Computes the checksum of a memory block at src, length len,
13 * and adds in "sum" (32-bit), while copying the block to dst.
14 * If an access exception occurs on src or dst, it stores -EFAULT
15 * to *src_err or *dst_err respectively (if that pointer is not
16 * NULL), and, for an error on src, zeroes the rest of dst.
18 * Like csum_partial, this must be called with even lengths,
19 * except for the last fragment.
21 extern __wsum
csum_partial_copy_generic(const void *src
, void *dst
, int len
);
23 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
24 extern __wsum
csum_and_copy_from_user(const void __user
*src
, void *dst
,
26 #define HAVE_CSUM_COPY_USER
27 extern __wsum
csum_and_copy_to_user(const void *src
, void __user
*dst
,
30 #define _HAVE_ARCH_CSUM_AND_COPY
31 #define csum_partial_copy_nocheck(src, dst, len) \
32 csum_partial_copy_generic((src), (dst), (len))
36 * turns a 32-bit partial checksum (e.g. from csum_partial) into a
37 * 1's complement 16-bit checksum.
39 static inline __sum16
csum_fold(__wsum sum
)
43 /* swap the two 16-bit halves of sum */
44 __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp
) : "r" (sum
));
45 /* if there is a carry from adding the two 16-bit halves,
46 it will carry from the lower half into the upper half,
47 giving us the correct sum in the upper half. */
48 return (__force __sum16
)(~((__force u32
)sum
+ tmp
) >> 16);
51 static inline u32
from64to32(u64 x
)
53 return (x
+ ror64(x
, 32)) >> 32;
56 static inline __wsum
csum_tcpudp_nofold(__be32 saddr
, __be32 daddr
, __u32 len
,
57 __u8 proto
, __wsum sum
)
60 u64 s
= (__force u32
)sum
;
62 s
+= (__force u32
)saddr
;
63 s
+= (__force u32
)daddr
;
67 s
+= (proto
+ len
) << 8;
69 return (__force __wsum
) from64to32(s
);
78 : "r" (daddr
), "r"(saddr
), "r"(proto
+ len
), "0"(sum
));
84 * computes the checksum of the TCP/UDP pseudo-header
85 * returns a 16-bit checksum, already complemented
87 static inline __sum16
csum_tcpudp_magic(__be32 saddr
, __be32 daddr
, __u32 len
,
88 __u8 proto
, __wsum sum
)
90 return csum_fold(csum_tcpudp_nofold(saddr
, daddr
, len
, proto
, sum
));
93 #define HAVE_ARCH_CSUM_ADD
94 static inline __wsum
csum_add(__wsum csum
, __wsum addend
)
97 u64 res
= (__force u64
)csum
;
99 if (__builtin_constant_p(csum
) && csum
== 0)
101 if (__builtin_constant_p(addend
) && addend
== 0)
105 res
+= (__force u64
)addend
;
106 return (__force __wsum
)((u32
)res
+ (res
>> 32));
110 : "+r" (csum
) : "r" (addend
) : "xer");
116 * This is a version of ip_compute_csum() optimized for IP headers,
117 * which always checksum on 4 octet boundaries. ihl is the number
118 * of 32-bit words and is always >= 5.
120 static inline __wsum
ip_fast_csum_nofold(const void *iph
, unsigned int ihl
)
122 const u32
*ptr
= (const u32
*)iph
+ 1;
125 u64 s
= *(const u32
*)iph
;
127 for (i
= 0; i
< ihl
- 1; i
++, ptr
++)
129 return (__force __wsum
)from64to32(s
);
139 : "=r" (sum
), "=r" (tmp
), "+b" (ptr
)
140 : "r" (ihl
- 2), "r" (*(const u32
*)iph
), "r" (*ptr
)
141 : "ctr", "xer", "memory");
147 static inline __sum16
ip_fast_csum(const void *iph
, unsigned int ihl
)
149 return csum_fold(ip_fast_csum_nofold(iph
, ihl
));
153 * computes the checksum of a memory block at buff, length len,
154 * and adds in "sum" (32-bit)
156 * returns a 32-bit number suitable for feeding into itself
157 * or csum_tcpudp_magic
159 * this function must be called with even lengths, except
160 * for the last fragment, which may be odd
162 * it's best to have buff aligned on a 32-bit boundary
164 __wsum
__csum_partial(const void *buff
, int len
, __wsum sum
);
166 static __always_inline __wsum
csum_partial(const void *buff
, int len
, __wsum sum
)
168 if (__builtin_constant_p(len
) && len
<= 16 && (len
& 1) == 0) {
170 sum
= csum_add(sum
, (__force __wsum
)*(const u16
*)buff
);
172 sum
= csum_add(sum
, (__force __wsum
)*(const u32
*)buff
);
174 sum
= csum_add(sum
, (__force __wsum
)
175 *(const u16
*)(buff
+ 4));
177 sum
= csum_add(sum
, (__force __wsum
)
178 *(const u32
*)(buff
+ 4));
180 sum
= csum_add(sum
, (__force __wsum
)
181 *(const u16
*)(buff
+ 8));
183 sum
= csum_add(sum
, (__force __wsum
)
184 *(const u32
*)(buff
+ 8));
186 sum
= csum_add(sum
, (__force __wsum
)
187 *(const u16
*)(buff
+ 12));
189 sum
= csum_add(sum
, (__force __wsum
)
190 *(const u32
*)(buff
+ 12));
191 } else if (__builtin_constant_p(len
) && (len
& 3) == 0) {
192 sum
= csum_add(sum
, ip_fast_csum_nofold(buff
, len
>> 2));
194 sum
= __csum_partial(buff
, len
, sum
);
200 * this routine is used for miscellaneous IP-like checksums, mainly
203 static inline __sum16
ip_compute_csum(const void *buff
, int len
)
205 return csum_fold(csum_partial(buff
, len
, 0));
208 #define _HAVE_ARCH_IPV6_CSUM
209 __sum16
csum_ipv6_magic(const struct in6_addr
*saddr
,
210 const struct in6_addr
*daddr
,
211 __u32 len
, __u8 proto
, __wsum sum
);
213 #endif /* __KERNEL__ */