2 * Checksum updating actions
4 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
19 #include <linux/netlink.h>
20 #include <net/netlink.h>
21 #include <linux/rtnetlink.h>
23 #include <linux/skbuff.h>
28 #include <linux/icmpv6.h>
29 #include <linux/igmp.h>
32 #include <net/ip6_checksum.h>
33 #include <net/sctp/checksum.h>
35 #include <net/act_api.h>
37 #include <linux/tc_act/tc_csum.h>
38 #include <net/tc_act/tc_csum.h>
40 #define CSUM_TAB_MASK 15
42 static const struct nla_policy csum_policy
[TCA_CSUM_MAX
+ 1] = {
43 [TCA_CSUM_PARMS
] = { .len
= sizeof(struct tc_csum
), },
46 static unsigned int csum_net_id
;
47 static struct tc_action_ops act_csum_ops
;
49 static int tcf_csum_init(struct net
*net
, struct nlattr
*nla
,
50 struct nlattr
*est
, struct tc_action
**a
, int ovr
,
53 struct tc_action_net
*tn
= net_generic(net
, csum_net_id
);
54 struct nlattr
*tb
[TCA_CSUM_MAX
+ 1];
62 err
= nla_parse_nested(tb
, TCA_CSUM_MAX
, nla
, csum_policy
);
66 if (tb
[TCA_CSUM_PARMS
] == NULL
)
68 parm
= nla_data(tb
[TCA_CSUM_PARMS
]);
70 if (!tcf_hash_check(tn
, parm
->index
, a
, bind
)) {
71 ret
= tcf_hash_create(tn
, parm
->index
, est
, a
,
72 &act_csum_ops
, bind
, false);
77 if (bind
)/* dont override defaults */
79 tcf_hash_release(*a
, bind
);
85 spin_lock_bh(&p
->tcf_lock
);
86 p
->tcf_action
= parm
->action
;
87 p
->update_flags
= parm
->update_flags
;
88 spin_unlock_bh(&p
->tcf_lock
);
90 if (ret
== ACT_P_CREATED
)
91 tcf_hash_insert(tn
, *a
);
97 * tcf_csum_skb_nextlayer - Get next layer pointer
98 * @skb: sk_buff to use
99 * @ihl: previous summed headers length
100 * @ipl: complete packet length
101 * @jhl: next header length
103 * Check the expected next layer availability in the specified sk_buff.
104 * Return the next layer pointer if pass, NULL otherwise.
106 static void *tcf_csum_skb_nextlayer(struct sk_buff
*skb
,
107 unsigned int ihl
, unsigned int ipl
,
110 int ntkoff
= skb_network_offset(skb
);
113 if (!pskb_may_pull(skb
, ipl
+ ntkoff
) || (ipl
< hl
) ||
114 skb_try_make_writable(skb
, hl
+ ntkoff
))
117 return (void *)(skb_network_header(skb
) + ihl
);
120 static int tcf_csum_ipv4_icmp(struct sk_buff
*skb
, unsigned int ihl
,
123 struct icmphdr
*icmph
;
125 icmph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*icmph
));
130 skb
->csum
= csum_partial(icmph
, ipl
- ihl
, 0);
131 icmph
->checksum
= csum_fold(skb
->csum
);
133 skb
->ip_summed
= CHECKSUM_NONE
;
138 static int tcf_csum_ipv4_igmp(struct sk_buff
*skb
,
139 unsigned int ihl
, unsigned int ipl
)
141 struct igmphdr
*igmph
;
143 igmph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*igmph
));
148 skb
->csum
= csum_partial(igmph
, ipl
- ihl
, 0);
149 igmph
->csum
= csum_fold(skb
->csum
);
151 skb
->ip_summed
= CHECKSUM_NONE
;
156 static int tcf_csum_ipv6_icmp(struct sk_buff
*skb
, unsigned int ihl
,
159 struct icmp6hdr
*icmp6h
;
160 const struct ipv6hdr
*ip6h
;
162 icmp6h
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*icmp6h
));
166 ip6h
= ipv6_hdr(skb
);
167 icmp6h
->icmp6_cksum
= 0;
168 skb
->csum
= csum_partial(icmp6h
, ipl
- ihl
, 0);
169 icmp6h
->icmp6_cksum
= csum_ipv6_magic(&ip6h
->saddr
, &ip6h
->daddr
,
170 ipl
- ihl
, IPPROTO_ICMPV6
,
173 skb
->ip_summed
= CHECKSUM_NONE
;
178 static int tcf_csum_ipv4_tcp(struct sk_buff
*skb
, unsigned int ihl
,
182 const struct iphdr
*iph
;
184 tcph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*tcph
));
190 skb
->csum
= csum_partial(tcph
, ipl
- ihl
, 0);
191 tcph
->check
= tcp_v4_check(ipl
- ihl
,
192 iph
->saddr
, iph
->daddr
, skb
->csum
);
194 skb
->ip_summed
= CHECKSUM_NONE
;
199 static int tcf_csum_ipv6_tcp(struct sk_buff
*skb
, unsigned int ihl
,
203 const struct ipv6hdr
*ip6h
;
205 tcph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*tcph
));
209 ip6h
= ipv6_hdr(skb
);
211 skb
->csum
= csum_partial(tcph
, ipl
- ihl
, 0);
212 tcph
->check
= csum_ipv6_magic(&ip6h
->saddr
, &ip6h
->daddr
,
213 ipl
- ihl
, IPPROTO_TCP
,
216 skb
->ip_summed
= CHECKSUM_NONE
;
221 static int tcf_csum_ipv4_udp(struct sk_buff
*skb
, unsigned int ihl
,
222 unsigned int ipl
, int udplite
)
225 const struct iphdr
*iph
;
229 * Support both UDP and UDPLITE checksum algorithms, Don't use
230 * udph->len to get the real length without any protocol check,
231 * UDPLITE uses udph->len for another thing,
232 * Use iph->tot_len, or just ipl.
235 udph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*udph
));
240 ul
= ntohs(udph
->len
);
242 if (udplite
|| udph
->check
) {
248 skb
->csum
= csum_partial(udph
, ipl
- ihl
, 0);
249 else if ((ul
>= sizeof(*udph
)) && (ul
<= ipl
- ihl
))
250 skb
->csum
= csum_partial(udph
, ul
, 0);
252 goto ignore_obscure_skb
;
255 goto ignore_obscure_skb
;
257 skb
->csum
= csum_partial(udph
, ul
, 0);
260 udph
->check
= csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
265 udph
->check
= CSUM_MANGLED_0
;
268 skb
->ip_summed
= CHECKSUM_NONE
;
274 static int tcf_csum_ipv6_udp(struct sk_buff
*skb
, unsigned int ihl
,
275 unsigned int ipl
, int udplite
)
278 const struct ipv6hdr
*ip6h
;
282 * Support both UDP and UDPLITE checksum algorithms, Don't use
283 * udph->len to get the real length without any protocol check,
284 * UDPLITE uses udph->len for another thing,
285 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
288 udph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*udph
));
292 ip6h
= ipv6_hdr(skb
);
293 ul
= ntohs(udph
->len
);
299 skb
->csum
= csum_partial(udph
, ipl
- ihl
, 0);
301 else if ((ul
>= sizeof(*udph
)) && (ul
<= ipl
- ihl
))
302 skb
->csum
= csum_partial(udph
, ul
, 0);
305 goto ignore_obscure_skb
;
308 goto ignore_obscure_skb
;
310 skb
->csum
= csum_partial(udph
, ul
, 0);
313 udph
->check
= csum_ipv6_magic(&ip6h
->saddr
, &ip6h
->daddr
, ul
,
314 udplite
? IPPROTO_UDPLITE
: IPPROTO_UDP
,
318 udph
->check
= CSUM_MANGLED_0
;
320 skb
->ip_summed
= CHECKSUM_NONE
;
326 static int tcf_csum_sctp(struct sk_buff
*skb
, unsigned int ihl
,
329 struct sctphdr
*sctph
;
331 if (skb_is_gso(skb
) && skb_shinfo(skb
)->gso_type
& SKB_GSO_SCTP
)
334 sctph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*sctph
));
338 sctph
->checksum
= sctp_compute_cksum(skb
,
339 skb_network_offset(skb
) + ihl
);
340 skb
->ip_summed
= CHECKSUM_NONE
;
345 static int tcf_csum_ipv4(struct sk_buff
*skb
, u32 update_flags
)
347 const struct iphdr
*iph
;
350 ntkoff
= skb_network_offset(skb
);
352 if (!pskb_may_pull(skb
, sizeof(*iph
) + ntkoff
))
357 switch (iph
->frag_off
& htons(IP_OFFSET
) ? 0 : iph
->protocol
) {
359 if (update_flags
& TCA_CSUM_UPDATE_FLAG_ICMP
)
360 if (!tcf_csum_ipv4_icmp(skb
, iph
->ihl
* 4,
361 ntohs(iph
->tot_len
)))
365 if (update_flags
& TCA_CSUM_UPDATE_FLAG_IGMP
)
366 if (!tcf_csum_ipv4_igmp(skb
, iph
->ihl
* 4,
367 ntohs(iph
->tot_len
)))
371 if (update_flags
& TCA_CSUM_UPDATE_FLAG_TCP
)
372 if (!tcf_csum_ipv4_tcp(skb
, iph
->ihl
* 4,
373 ntohs(iph
->tot_len
)))
377 if (update_flags
& TCA_CSUM_UPDATE_FLAG_UDP
)
378 if (!tcf_csum_ipv4_udp(skb
, iph
->ihl
* 4,
379 ntohs(iph
->tot_len
), 0))
382 case IPPROTO_UDPLITE
:
383 if (update_flags
& TCA_CSUM_UPDATE_FLAG_UDPLITE
)
384 if (!tcf_csum_ipv4_udp(skb
, iph
->ihl
* 4,
385 ntohs(iph
->tot_len
), 1))
389 if ((update_flags
& TCA_CSUM_UPDATE_FLAG_SCTP
) &&
390 !tcf_csum_sctp(skb
, iph
->ihl
* 4, ntohs(iph
->tot_len
)))
395 if (update_flags
& TCA_CSUM_UPDATE_FLAG_IPV4HDR
) {
396 if (skb_try_make_writable(skb
, sizeof(*iph
) + ntkoff
))
399 ip_send_check(ip_hdr(skb
));
408 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr
*ip6xh
, unsigned int ixhl
,
411 int off
, len
, optlen
;
412 unsigned char *xh
= (void *)ip6xh
;
414 off
= sizeof(*ip6xh
);
423 optlen
= xh
[off
+ 1] + 2;
424 if (optlen
!= 6 || len
< 6 || (off
& 3) != 2)
425 /* wrong jumbo option length/alignment */
427 *pl
= ntohl(*(__be32
*)(xh
+ off
+ 2));
430 optlen
= xh
[off
+ 1] + 2;
432 /* ignore obscure options */
444 static int tcf_csum_ipv6(struct sk_buff
*skb
, u32 update_flags
)
446 struct ipv6hdr
*ip6h
;
447 struct ipv6_opt_hdr
*ip6xh
;
448 unsigned int hl
, ixhl
;
453 ntkoff
= skb_network_offset(skb
);
457 if (!pskb_may_pull(skb
, hl
+ ntkoff
))
460 ip6h
= ipv6_hdr(skb
);
462 pl
= ntohs(ip6h
->payload_len
);
463 nexthdr
= ip6h
->nexthdr
;
467 case NEXTHDR_FRAGMENT
:
469 case NEXTHDR_ROUTING
:
472 if (!pskb_may_pull(skb
, hl
+ sizeof(*ip6xh
) + ntkoff
))
474 ip6xh
= (void *)(skb_network_header(skb
) + hl
);
475 ixhl
= ipv6_optlen(ip6xh
);
476 if (!pskb_may_pull(skb
, hl
+ ixhl
+ ntkoff
))
478 ip6xh
= (void *)(skb_network_header(skb
) + hl
);
479 if ((nexthdr
== NEXTHDR_HOP
) &&
480 !(tcf_csum_ipv6_hopopts(ip6xh
, ixhl
, &pl
)))
482 nexthdr
= ip6xh
->nexthdr
;
486 if (update_flags
& TCA_CSUM_UPDATE_FLAG_ICMP
)
487 if (!tcf_csum_ipv6_icmp(skb
,
488 hl
, pl
+ sizeof(*ip6h
)))
492 if (update_flags
& TCA_CSUM_UPDATE_FLAG_TCP
)
493 if (!tcf_csum_ipv6_tcp(skb
,
494 hl
, pl
+ sizeof(*ip6h
)))
498 if (update_flags
& TCA_CSUM_UPDATE_FLAG_UDP
)
499 if (!tcf_csum_ipv6_udp(skb
, hl
,
500 pl
+ sizeof(*ip6h
), 0))
503 case IPPROTO_UDPLITE
:
504 if (update_flags
& TCA_CSUM_UPDATE_FLAG_UDPLITE
)
505 if (!tcf_csum_ipv6_udp(skb
, hl
,
506 pl
+ sizeof(*ip6h
), 1))
510 if ((update_flags
& TCA_CSUM_UPDATE_FLAG_SCTP
) &&
511 !tcf_csum_sctp(skb
, hl
, pl
+ sizeof(*ip6h
)))
517 } while (pskb_may_pull(skb
, hl
+ 1 + ntkoff
));
527 static int tcf_csum(struct sk_buff
*skb
, const struct tc_action
*a
,
528 struct tcf_result
*res
)
530 struct tcf_csum
*p
= to_tcf_csum(a
);
534 spin_lock(&p
->tcf_lock
);
535 tcf_lastuse_update(&p
->tcf_tm
);
536 bstats_update(&p
->tcf_bstats
, skb
);
537 action
= p
->tcf_action
;
538 update_flags
= p
->update_flags
;
539 spin_unlock(&p
->tcf_lock
);
541 if (unlikely(action
== TC_ACT_SHOT
))
544 switch (tc_skb_protocol(skb
)) {
545 case cpu_to_be16(ETH_P_IP
):
546 if (!tcf_csum_ipv4(skb
, update_flags
))
549 case cpu_to_be16(ETH_P_IPV6
):
550 if (!tcf_csum_ipv6(skb
, update_flags
))
558 spin_lock(&p
->tcf_lock
);
559 p
->tcf_qstats
.drops
++;
560 spin_unlock(&p
->tcf_lock
);
564 static int tcf_csum_dump(struct sk_buff
*skb
, struct tc_action
*a
, int bind
,
567 unsigned char *b
= skb_tail_pointer(skb
);
568 struct tcf_csum
*p
= to_tcf_csum(a
);
569 struct tc_csum opt
= {
570 .update_flags
= p
->update_flags
,
571 .index
= p
->tcf_index
,
572 .action
= p
->tcf_action
,
573 .refcnt
= p
->tcf_refcnt
- ref
,
574 .bindcnt
= p
->tcf_bindcnt
- bind
,
578 if (nla_put(skb
, TCA_CSUM_PARMS
, sizeof(opt
), &opt
))
579 goto nla_put_failure
;
581 tcf_tm_dump(&t
, &p
->tcf_tm
);
582 if (nla_put_64bit(skb
, TCA_CSUM_TM
, sizeof(t
), &t
, TCA_CSUM_PAD
))
583 goto nla_put_failure
;
592 static int tcf_csum_walker(struct net
*net
, struct sk_buff
*skb
,
593 struct netlink_callback
*cb
, int type
,
594 const struct tc_action_ops
*ops
)
596 struct tc_action_net
*tn
= net_generic(net
, csum_net_id
);
598 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
);
601 static int tcf_csum_search(struct net
*net
, struct tc_action
**a
, u32 index
)
603 struct tc_action_net
*tn
= net_generic(net
, csum_net_id
);
605 return tcf_hash_search(tn
, a
, index
);
608 static struct tc_action_ops act_csum_ops
= {
610 .type
= TCA_ACT_CSUM
,
611 .owner
= THIS_MODULE
,
613 .dump
= tcf_csum_dump
,
614 .init
= tcf_csum_init
,
615 .walk
= tcf_csum_walker
,
616 .lookup
= tcf_csum_search
,
617 .size
= sizeof(struct tcf_csum
),
620 static __net_init
int csum_init_net(struct net
*net
)
622 struct tc_action_net
*tn
= net_generic(net
, csum_net_id
);
624 return tc_action_net_init(tn
, &act_csum_ops
, CSUM_TAB_MASK
);
627 static void __net_exit
csum_exit_net(struct net
*net
)
629 struct tc_action_net
*tn
= net_generic(net
, csum_net_id
);
631 tc_action_net_exit(tn
);
634 static struct pernet_operations csum_net_ops
= {
635 .init
= csum_init_net
,
636 .exit
= csum_exit_net
,
638 .size
= sizeof(struct tc_action_net
),
641 MODULE_DESCRIPTION("Checksum updating actions");
642 MODULE_LICENSE("GPL");
644 static int __init
csum_init_module(void)
646 return tcf_register_action(&act_csum_ops
, &csum_net_ops
);
649 static void __exit
csum_cleanup_module(void)
651 tcf_unregister_action(&act_csum_ops
, &csum_net_ops
);
654 module_init(csum_init_module
);
655 module_exit(csum_cleanup_module
);