2 * Checksum updating actions
4 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
19 #include <linux/netlink.h>
20 #include <net/netlink.h>
21 #include <linux/rtnetlink.h>
23 #include <linux/skbuff.h>
28 #include <linux/icmpv6.h>
29 #include <linux/igmp.h>
32 #include <net/ip6_checksum.h>
34 #include <net/act_api.h>
36 #include <linux/tc_act/tc_csum.h>
37 #include <net/tc_act/tc_csum.h>
39 #define CSUM_TAB_MASK 15
41 static const struct nla_policy csum_policy
[TCA_CSUM_MAX
+ 1] = {
42 [TCA_CSUM_PARMS
] = { .len
= sizeof(struct tc_csum
), },
45 static int tcf_csum_init(struct net
*n
, struct nlattr
*nla
, struct nlattr
*est
,
46 struct tc_action
*a
, int ovr
, int bind
)
48 struct nlattr
*tb
[TCA_CSUM_MAX
+ 1];
56 err
= nla_parse_nested(tb
, TCA_CSUM_MAX
, nla
, csum_policy
);
60 if (tb
[TCA_CSUM_PARMS
] == NULL
)
62 parm
= nla_data(tb
[TCA_CSUM_PARMS
]);
64 if (!tcf_hash_check(parm
->index
, a
, bind
)) {
65 ret
= tcf_hash_create(parm
->index
, est
, a
, sizeof(*p
), bind
);
70 if (bind
)/* dont override defaults */
72 tcf_hash_release(a
, bind
);
78 spin_lock_bh(&p
->tcf_lock
);
79 p
->tcf_action
= parm
->action
;
80 p
->update_flags
= parm
->update_flags
;
81 spin_unlock_bh(&p
->tcf_lock
);
83 if (ret
== ACT_P_CREATED
)
90 * tcf_csum_skb_nextlayer - Get next layer pointer
91 * @skb: sk_buff to use
92 * @ihl: previous summed headers length
93 * @ipl: complete packet length
94 * @jhl: next header length
96 * Check the expected next layer availability in the specified sk_buff.
97 * Return the next layer pointer if pass, NULL otherwise.
99 static void *tcf_csum_skb_nextlayer(struct sk_buff
*skb
,
100 unsigned int ihl
, unsigned int ipl
,
103 int ntkoff
= skb_network_offset(skb
);
106 if (!pskb_may_pull(skb
, ipl
+ ntkoff
) || (ipl
< hl
) ||
108 !skb_clone_writable(skb
, hl
+ ntkoff
) &&
109 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)))
112 return (void *)(skb_network_header(skb
) + ihl
);
115 static int tcf_csum_ipv4_icmp(struct sk_buff
*skb
,
116 unsigned int ihl
, unsigned int ipl
)
118 struct icmphdr
*icmph
;
120 icmph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*icmph
));
125 skb
->csum
= csum_partial(icmph
, ipl
- ihl
, 0);
126 icmph
->checksum
= csum_fold(skb
->csum
);
128 skb
->ip_summed
= CHECKSUM_NONE
;
133 static int tcf_csum_ipv4_igmp(struct sk_buff
*skb
,
134 unsigned int ihl
, unsigned int ipl
)
136 struct igmphdr
*igmph
;
138 igmph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*igmph
));
143 skb
->csum
= csum_partial(igmph
, ipl
- ihl
, 0);
144 igmph
->csum
= csum_fold(skb
->csum
);
146 skb
->ip_summed
= CHECKSUM_NONE
;
151 static int tcf_csum_ipv6_icmp(struct sk_buff
*skb
,
152 unsigned int ihl
, unsigned int ipl
)
154 struct icmp6hdr
*icmp6h
;
155 const struct ipv6hdr
*ip6h
;
157 icmp6h
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*icmp6h
));
161 ip6h
= ipv6_hdr(skb
);
162 icmp6h
->icmp6_cksum
= 0;
163 skb
->csum
= csum_partial(icmp6h
, ipl
- ihl
, 0);
164 icmp6h
->icmp6_cksum
= csum_ipv6_magic(&ip6h
->saddr
, &ip6h
->daddr
,
165 ipl
- ihl
, IPPROTO_ICMPV6
,
168 skb
->ip_summed
= CHECKSUM_NONE
;
173 static int tcf_csum_ipv4_tcp(struct sk_buff
*skb
,
174 unsigned int ihl
, unsigned int ipl
)
177 const struct iphdr
*iph
;
179 tcph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*tcph
));
185 skb
->csum
= csum_partial(tcph
, ipl
- ihl
, 0);
186 tcph
->check
= tcp_v4_check(ipl
- ihl
,
187 iph
->saddr
, iph
->daddr
, skb
->csum
);
189 skb
->ip_summed
= CHECKSUM_NONE
;
194 static int tcf_csum_ipv6_tcp(struct sk_buff
*skb
,
195 unsigned int ihl
, unsigned int ipl
)
198 const struct ipv6hdr
*ip6h
;
200 tcph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*tcph
));
204 ip6h
= ipv6_hdr(skb
);
206 skb
->csum
= csum_partial(tcph
, ipl
- ihl
, 0);
207 tcph
->check
= csum_ipv6_magic(&ip6h
->saddr
, &ip6h
->daddr
,
208 ipl
- ihl
, IPPROTO_TCP
,
211 skb
->ip_summed
= CHECKSUM_NONE
;
216 static int tcf_csum_ipv4_udp(struct sk_buff
*skb
,
217 unsigned int ihl
, unsigned int ipl
, int udplite
)
220 const struct iphdr
*iph
;
224 * Support both UDP and UDPLITE checksum algorithms, Don't use
225 * udph->len to get the real length without any protocol check,
226 * UDPLITE uses udph->len for another thing,
227 * Use iph->tot_len, or just ipl.
230 udph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*udph
));
235 ul
= ntohs(udph
->len
);
237 if (udplite
|| udph
->check
) {
243 skb
->csum
= csum_partial(udph
, ipl
- ihl
, 0);
244 else if ((ul
>= sizeof(*udph
)) && (ul
<= ipl
- ihl
))
245 skb
->csum
= csum_partial(udph
, ul
, 0);
247 goto ignore_obscure_skb
;
250 goto ignore_obscure_skb
;
252 skb
->csum
= csum_partial(udph
, ul
, 0);
255 udph
->check
= csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
260 udph
->check
= CSUM_MANGLED_0
;
263 skb
->ip_summed
= CHECKSUM_NONE
;
269 static int tcf_csum_ipv6_udp(struct sk_buff
*skb
,
270 unsigned int ihl
, unsigned int ipl
, int udplite
)
273 const struct ipv6hdr
*ip6h
;
277 * Support both UDP and UDPLITE checksum algorithms, Don't use
278 * udph->len to get the real length without any protocol check,
279 * UDPLITE uses udph->len for another thing,
280 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
283 udph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*udph
));
287 ip6h
= ipv6_hdr(skb
);
288 ul
= ntohs(udph
->len
);
294 skb
->csum
= csum_partial(udph
, ipl
- ihl
, 0);
296 else if ((ul
>= sizeof(*udph
)) && (ul
<= ipl
- ihl
))
297 skb
->csum
= csum_partial(udph
, ul
, 0);
300 goto ignore_obscure_skb
;
303 goto ignore_obscure_skb
;
305 skb
->csum
= csum_partial(udph
, ul
, 0);
308 udph
->check
= csum_ipv6_magic(&ip6h
->saddr
, &ip6h
->daddr
, ul
,
309 udplite
? IPPROTO_UDPLITE
: IPPROTO_UDP
,
313 udph
->check
= CSUM_MANGLED_0
;
315 skb
->ip_summed
= CHECKSUM_NONE
;
321 static int tcf_csum_ipv4(struct sk_buff
*skb
, u32 update_flags
)
323 const struct iphdr
*iph
;
326 ntkoff
= skb_network_offset(skb
);
328 if (!pskb_may_pull(skb
, sizeof(*iph
) + ntkoff
))
333 switch (iph
->frag_off
& htons(IP_OFFSET
) ? 0 : iph
->protocol
) {
335 if (update_flags
& TCA_CSUM_UPDATE_FLAG_ICMP
)
336 if (!tcf_csum_ipv4_icmp(skb
, iph
->ihl
* 4,
337 ntohs(iph
->tot_len
)))
341 if (update_flags
& TCA_CSUM_UPDATE_FLAG_IGMP
)
342 if (!tcf_csum_ipv4_igmp(skb
, iph
->ihl
* 4,
343 ntohs(iph
->tot_len
)))
347 if (update_flags
& TCA_CSUM_UPDATE_FLAG_TCP
)
348 if (!tcf_csum_ipv4_tcp(skb
, iph
->ihl
* 4,
349 ntohs(iph
->tot_len
)))
353 if (update_flags
& TCA_CSUM_UPDATE_FLAG_UDP
)
354 if (!tcf_csum_ipv4_udp(skb
, iph
->ihl
* 4,
355 ntohs(iph
->tot_len
), 0))
358 case IPPROTO_UDPLITE
:
359 if (update_flags
& TCA_CSUM_UPDATE_FLAG_UDPLITE
)
360 if (!tcf_csum_ipv4_udp(skb
, iph
->ihl
* 4,
361 ntohs(iph
->tot_len
), 1))
366 if (update_flags
& TCA_CSUM_UPDATE_FLAG_IPV4HDR
) {
367 if (skb_cloned(skb
) &&
368 !skb_clone_writable(skb
, sizeof(*iph
) + ntkoff
) &&
369 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
372 ip_send_check(ip_hdr(skb
));
381 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr
*ip6xh
,
382 unsigned int ixhl
, unsigned int *pl
)
384 int off
, len
, optlen
;
385 unsigned char *xh
= (void *)ip6xh
;
387 off
= sizeof(*ip6xh
);
396 optlen
= xh
[off
+ 1] + 2;
397 if (optlen
!= 6 || len
< 6 || (off
& 3) != 2)
398 /* wrong jumbo option length/alignment */
400 *pl
= ntohl(*(__be32
*)(xh
+ off
+ 2));
403 optlen
= xh
[off
+ 1] + 2;
405 /* ignore obscure options */
417 static int tcf_csum_ipv6(struct sk_buff
*skb
, u32 update_flags
)
419 struct ipv6hdr
*ip6h
;
420 struct ipv6_opt_hdr
*ip6xh
;
421 unsigned int hl
, ixhl
;
426 ntkoff
= skb_network_offset(skb
);
430 if (!pskb_may_pull(skb
, hl
+ ntkoff
))
433 ip6h
= ipv6_hdr(skb
);
435 pl
= ntohs(ip6h
->payload_len
);
436 nexthdr
= ip6h
->nexthdr
;
440 case NEXTHDR_FRAGMENT
:
442 case NEXTHDR_ROUTING
:
445 if (!pskb_may_pull(skb
, hl
+ sizeof(*ip6xh
) + ntkoff
))
447 ip6xh
= (void *)(skb_network_header(skb
) + hl
);
448 ixhl
= ipv6_optlen(ip6xh
);
449 if (!pskb_may_pull(skb
, hl
+ ixhl
+ ntkoff
))
451 ip6xh
= (void *)(skb_network_header(skb
) + hl
);
452 if ((nexthdr
== NEXTHDR_HOP
) &&
453 !(tcf_csum_ipv6_hopopts(ip6xh
, ixhl
, &pl
)))
455 nexthdr
= ip6xh
->nexthdr
;
459 if (update_flags
& TCA_CSUM_UPDATE_FLAG_ICMP
)
460 if (!tcf_csum_ipv6_icmp(skb
,
461 hl
, pl
+ sizeof(*ip6h
)))
465 if (update_flags
& TCA_CSUM_UPDATE_FLAG_TCP
)
466 if (!tcf_csum_ipv6_tcp(skb
,
467 hl
, pl
+ sizeof(*ip6h
)))
471 if (update_flags
& TCA_CSUM_UPDATE_FLAG_UDP
)
472 if (!tcf_csum_ipv6_udp(skb
, hl
,
473 pl
+ sizeof(*ip6h
), 0))
476 case IPPROTO_UDPLITE
:
477 if (update_flags
& TCA_CSUM_UPDATE_FLAG_UDPLITE
)
478 if (!tcf_csum_ipv6_udp(skb
, hl
,
479 pl
+ sizeof(*ip6h
), 1))
485 } while (pskb_may_pull(skb
, hl
+ 1 + ntkoff
));
495 static int tcf_csum(struct sk_buff
*skb
,
496 const struct tc_action
*a
, struct tcf_result
*res
)
498 struct tcf_csum
*p
= a
->priv
;
502 spin_lock(&p
->tcf_lock
);
503 p
->tcf_tm
.lastuse
= jiffies
;
504 bstats_update(&p
->tcf_bstats
, skb
);
505 action
= p
->tcf_action
;
506 update_flags
= p
->update_flags
;
507 spin_unlock(&p
->tcf_lock
);
509 if (unlikely(action
== TC_ACT_SHOT
))
512 switch (tc_skb_protocol(skb
)) {
513 case cpu_to_be16(ETH_P_IP
):
514 if (!tcf_csum_ipv4(skb
, update_flags
))
517 case cpu_to_be16(ETH_P_IPV6
):
518 if (!tcf_csum_ipv6(skb
, update_flags
))
526 spin_lock(&p
->tcf_lock
);
527 p
->tcf_qstats
.drops
++;
528 spin_unlock(&p
->tcf_lock
);
532 static int tcf_csum_dump(struct sk_buff
*skb
,
533 struct tc_action
*a
, int bind
, int ref
)
535 unsigned char *b
= skb_tail_pointer(skb
);
536 struct tcf_csum
*p
= a
->priv
;
537 struct tc_csum opt
= {
538 .update_flags
= p
->update_flags
,
539 .index
= p
->tcf_index
,
540 .action
= p
->tcf_action
,
541 .refcnt
= p
->tcf_refcnt
- ref
,
542 .bindcnt
= p
->tcf_bindcnt
- bind
,
546 if (nla_put(skb
, TCA_CSUM_PARMS
, sizeof(opt
), &opt
))
547 goto nla_put_failure
;
548 t
.install
= jiffies_to_clock_t(jiffies
- p
->tcf_tm
.install
);
549 t
.lastuse
= jiffies_to_clock_t(jiffies
- p
->tcf_tm
.lastuse
);
550 t
.expires
= jiffies_to_clock_t(p
->tcf_tm
.expires
);
551 if (nla_put(skb
, TCA_CSUM_TM
, sizeof(t
), &t
))
552 goto nla_put_failure
;
561 static struct tc_action_ops act_csum_ops
= {
563 .type
= TCA_ACT_CSUM
,
564 .owner
= THIS_MODULE
,
566 .dump
= tcf_csum_dump
,
567 .init
= tcf_csum_init
,
570 MODULE_DESCRIPTION("Checksum updating actions");
571 MODULE_LICENSE("GPL");
573 static int __init
csum_init_module(void)
575 return tcf_register_action(&act_csum_ops
, CSUM_TAB_MASK
);
578 static void __exit
csum_cleanup_module(void)
580 tcf_unregister_action(&act_csum_ops
);
583 module_init(csum_init_module
);
584 module_exit(csum_cleanup_module
);