1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_CHECKSUM_H
3 #define _ASM_POWERPC_CHECKSUM_H
9 #include <linux/bitops.h>
10 #include <linux/in6.h>
12 * Computes the checksum of a memory block at src, length len,
13 * and adds in "sum" (32-bit), while copying the block to dst.
14 * If an access exception occurs on src or dst, it stores -EFAULT
15 * to *src_err or *dst_err respectively (if that pointer is not
16 * NULL), and, for an error on src, zeroes the rest of dst.
18 * Like csum_partial, this must be called with even lengths,
19 * except for the last fragment.
21 extern __wsum
csum_partial_copy_generic(const void *src
, void *dst
,
23 int *src_err
, int *dst_err
);
25 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
26 extern __wsum
csum_and_copy_from_user(const void __user
*src
, void *dst
,
27 int len
, __wsum sum
, int *err_ptr
);
28 #define HAVE_CSUM_COPY_USER
29 extern __wsum
csum_and_copy_to_user(const void *src
, void __user
*dst
,
30 int len
, __wsum sum
, int *err_ptr
);
32 #define csum_partial_copy_nocheck(src, dst, len, sum) \
33 csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
37 * turns a 32-bit partial checksum (e.g. from csum_partial) into a
38 * 1's complement 16-bit checksum.
40 static inline __sum16
csum_fold(__wsum sum
)
44 /* swap the two 16-bit halves of sum */
45 __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp
) : "r" (sum
));
46 /* if there is a carry from adding the two 16-bit halves,
47 it will carry from the lower half into the upper half,
48 giving us the correct sum in the upper half. */
49 return (__force __sum16
)(~((__force u32
)sum
+ tmp
) >> 16);
52 static inline u32
from64to32(u64 x
)
54 return (x
+ ror64(x
, 32)) >> 32;
57 static inline __wsum
csum_tcpudp_nofold(__be32 saddr
, __be32 daddr
, __u32 len
,
58 __u8 proto
, __wsum sum
)
61 u64 s
= (__force u32
)sum
;
63 s
+= (__force u32
)saddr
;
64 s
+= (__force u32
)daddr
;
68 s
+= (proto
+ len
) << 8;
70 return (__force __wsum
) from64to32(s
);
79 : "r" (daddr
), "r"(saddr
), "r"(proto
+ len
), "0"(sum
));
85 * computes the checksum of the TCP/UDP pseudo-header
86 * returns a 16-bit checksum, already complemented
88 static inline __sum16
csum_tcpudp_magic(__be32 saddr
, __be32 daddr
, __u32 len
,
89 __u8 proto
, __wsum sum
)
91 return csum_fold(csum_tcpudp_nofold(saddr
, daddr
, len
, proto
, sum
));
94 #define HAVE_ARCH_CSUM_ADD
95 static inline __wsum
csum_add(__wsum csum
, __wsum addend
)
98 u64 res
= (__force u64
)csum
;
100 if (__builtin_constant_p(csum
) && csum
== 0)
102 if (__builtin_constant_p(addend
) && addend
== 0)
106 res
+= (__force u64
)addend
;
107 return (__force __wsum
)((u32
)res
+ (res
>> 32));
111 : "+r" (csum
) : "r" (addend
) : "xer");
117 * This is a version of ip_compute_csum() optimized for IP headers,
118 * which always checksum on 4 octet boundaries. ihl is the number
119 * of 32-bit words and is always >= 5.
121 static inline __wsum
ip_fast_csum_nofold(const void *iph
, unsigned int ihl
)
123 const u32
*ptr
= (const u32
*)iph
+ 1;
126 u64 s
= *(const u32
*)iph
;
128 for (i
= 0; i
< ihl
- 1; i
++, ptr
++)
130 return (__force __wsum
)from64to32(s
);
140 : "=r" (sum
), "=r" (tmp
), "+b" (ptr
)
141 : "r" (ihl
- 2), "r" (*(const u32
*)iph
), "r" (*ptr
)
142 : "ctr", "xer", "memory");
148 static inline __sum16
ip_fast_csum(const void *iph
, unsigned int ihl
)
150 return csum_fold(ip_fast_csum_nofold(iph
, ihl
));
154 * computes the checksum of a memory block at buff, length len,
155 * and adds in "sum" (32-bit)
157 * returns a 32-bit number suitable for feeding into itself
158 * or csum_tcpudp_magic
160 * this function must be called with even lengths, except
161 * for the last fragment, which may be odd
163 * it's best to have buff aligned on a 32-bit boundary
165 __wsum
__csum_partial(const void *buff
, int len
, __wsum sum
);
167 static inline __wsum
csum_partial(const void *buff
, int len
, __wsum sum
)
169 if (__builtin_constant_p(len
) && len
<= 16 && (len
& 1) == 0) {
171 sum
= csum_add(sum
, (__force __wsum
)*(const u16
*)buff
);
173 sum
= csum_add(sum
, (__force __wsum
)*(const u32
*)buff
);
175 sum
= csum_add(sum
, (__force __wsum
)
176 *(const u16
*)(buff
+ 4));
178 sum
= csum_add(sum
, (__force __wsum
)
179 *(const u32
*)(buff
+ 4));
181 sum
= csum_add(sum
, (__force __wsum
)
182 *(const u16
*)(buff
+ 8));
184 sum
= csum_add(sum
, (__force __wsum
)
185 *(const u32
*)(buff
+ 8));
187 sum
= csum_add(sum
, (__force __wsum
)
188 *(const u16
*)(buff
+ 12));
190 sum
= csum_add(sum
, (__force __wsum
)
191 *(const u32
*)(buff
+ 12));
192 } else if (__builtin_constant_p(len
) && (len
& 3) == 0) {
193 sum
= csum_add(sum
, ip_fast_csum_nofold(buff
, len
>> 2));
195 sum
= __csum_partial(buff
, len
, sum
);
201 * this routine is used for miscellaneous IP-like checksums, mainly
204 static inline __sum16
ip_compute_csum(const void *buff
, int len
)
206 return csum_fold(csum_partial(buff
, len
, 0));
209 #define _HAVE_ARCH_IPV6_CSUM
210 __sum16
csum_ipv6_magic(const struct in6_addr
*saddr
,
211 const struct in6_addr
*daddr
,
212 __u32 len
, __u8 proto
, __wsum sum
);
214 #endif /* __KERNEL__ */