KVM: SVM: Implement infrastructure for TSC_RATE_MSR
[linux/fpc-iii.git] / net / sched / act_csum.c
blob6cdf9abe475f065aa2b5d9c648131e80932bdeb0
1 /*
2 * Checksum updating actions
4 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
19 #include <linux/netlink.h>
20 #include <net/netlink.h>
21 #include <linux/rtnetlink.h>
23 #include <linux/skbuff.h>
25 #include <net/ip.h>
26 #include <net/ipv6.h>
27 #include <net/icmp.h>
28 #include <linux/icmpv6.h>
29 #include <linux/igmp.h>
30 #include <net/tcp.h>
31 #include <net/udp.h>
32 #include <net/ip6_checksum.h>
34 #include <net/act_api.h>
36 #include <linux/tc_act/tc_csum.h>
37 #include <net/tc_act/tc_csum.h>
39 #define CSUM_TAB_MASK 15
40 static struct tcf_common *tcf_csum_ht[CSUM_TAB_MASK + 1];
41 static u32 csum_idx_gen;
42 static DEFINE_RWLOCK(csum_lock);
44 static struct tcf_hashinfo csum_hash_info = {
45 .htab = tcf_csum_ht,
46 .hmask = CSUM_TAB_MASK,
47 .lock = &csum_lock,
50 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
51 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
54 static int tcf_csum_init(struct nlattr *nla, struct nlattr *est,
55 struct tc_action *a, int ovr, int bind)
57 struct nlattr *tb[TCA_CSUM_MAX + 1];
58 struct tc_csum *parm;
59 struct tcf_common *pc;
60 struct tcf_csum *p;
61 int ret = 0, err;
63 if (nla == NULL)
64 return -EINVAL;
66 err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy);
67 if (err < 0)
68 return err;
70 if (tb[TCA_CSUM_PARMS] == NULL)
71 return -EINVAL;
72 parm = nla_data(tb[TCA_CSUM_PARMS]);
74 pc = tcf_hash_check(parm->index, a, bind, &csum_hash_info);
75 if (!pc) {
76 pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
77 &csum_idx_gen, &csum_hash_info);
78 if (IS_ERR(pc))
79 return PTR_ERR(pc);
80 p = to_tcf_csum(pc);
81 ret = ACT_P_CREATED;
82 } else {
83 p = to_tcf_csum(pc);
84 if (!ovr) {
85 tcf_hash_release(pc, bind, &csum_hash_info);
86 return -EEXIST;
90 spin_lock_bh(&p->tcf_lock);
91 p->tcf_action = parm->action;
92 p->update_flags = parm->update_flags;
93 spin_unlock_bh(&p->tcf_lock);
95 if (ret == ACT_P_CREATED)
96 tcf_hash_insert(pc, &csum_hash_info);
98 return ret;
101 static int tcf_csum_cleanup(struct tc_action *a, int bind)
103 struct tcf_csum *p = a->priv;
104 return tcf_hash_release(&p->common, bind, &csum_hash_info);
108 * tcf_csum_skb_nextlayer - Get next layer pointer
109 * @skb: sk_buff to use
110 * @ihl: previous summed headers length
111 * @ipl: complete packet length
112 * @jhl: next header length
114 * Check the expected next layer availability in the specified sk_buff.
115 * Return the next layer pointer if pass, NULL otherwise.
117 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
118 unsigned int ihl, unsigned int ipl,
119 unsigned int jhl)
121 int ntkoff = skb_network_offset(skb);
122 int hl = ihl + jhl;
124 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
125 (skb_cloned(skb) &&
126 !skb_clone_writable(skb, hl + ntkoff) &&
127 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
128 return NULL;
129 else
130 return (void *)(skb_network_header(skb) + ihl);
133 static int tcf_csum_ipv4_icmp(struct sk_buff *skb,
134 unsigned int ihl, unsigned int ipl)
136 struct icmphdr *icmph;
138 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
139 if (icmph == NULL)
140 return 0;
142 icmph->checksum = 0;
143 skb->csum = csum_partial(icmph, ipl - ihl, 0);
144 icmph->checksum = csum_fold(skb->csum);
146 skb->ip_summed = CHECKSUM_NONE;
148 return 1;
151 static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
152 unsigned int ihl, unsigned int ipl)
154 struct igmphdr *igmph;
156 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
157 if (igmph == NULL)
158 return 0;
160 igmph->csum = 0;
161 skb->csum = csum_partial(igmph, ipl - ihl, 0);
162 igmph->csum = csum_fold(skb->csum);
164 skb->ip_summed = CHECKSUM_NONE;
166 return 1;
169 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, struct ipv6hdr *ip6h,
170 unsigned int ihl, unsigned int ipl)
172 struct icmp6hdr *icmp6h;
174 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
175 if (icmp6h == NULL)
176 return 0;
178 icmp6h->icmp6_cksum = 0;
179 skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
180 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
181 ipl - ihl, IPPROTO_ICMPV6,
182 skb->csum);
184 skb->ip_summed = CHECKSUM_NONE;
186 return 1;
189 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, struct iphdr *iph,
190 unsigned int ihl, unsigned int ipl)
192 struct tcphdr *tcph;
194 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
195 if (tcph == NULL)
196 return 0;
198 tcph->check = 0;
199 skb->csum = csum_partial(tcph, ipl - ihl, 0);
200 tcph->check = tcp_v4_check(ipl - ihl,
201 iph->saddr, iph->daddr, skb->csum);
203 skb->ip_summed = CHECKSUM_NONE;
205 return 1;
208 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, struct ipv6hdr *ip6h,
209 unsigned int ihl, unsigned int ipl)
211 struct tcphdr *tcph;
213 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
214 if (tcph == NULL)
215 return 0;
217 tcph->check = 0;
218 skb->csum = csum_partial(tcph, ipl - ihl, 0);
219 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
220 ipl - ihl, IPPROTO_TCP,
221 skb->csum);
223 skb->ip_summed = CHECKSUM_NONE;
225 return 1;
228 static int tcf_csum_ipv4_udp(struct sk_buff *skb, struct iphdr *iph,
229 unsigned int ihl, unsigned int ipl, int udplite)
231 struct udphdr *udph;
232 u16 ul;
235 * Support both UDP and UDPLITE checksum algorithms, Don't use
236 * udph->len to get the real length without any protocol check,
237 * UDPLITE uses udph->len for another thing,
238 * Use iph->tot_len, or just ipl.
241 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
242 if (udph == NULL)
243 return 0;
245 ul = ntohs(udph->len);
247 if (udplite || udph->check) {
249 udph->check = 0;
251 if (udplite) {
252 if (ul == 0)
253 skb->csum = csum_partial(udph, ipl - ihl, 0);
254 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
255 skb->csum = csum_partial(udph, ul, 0);
256 else
257 goto ignore_obscure_skb;
258 } else {
259 if (ul != ipl - ihl)
260 goto ignore_obscure_skb;
262 skb->csum = csum_partial(udph, ul, 0);
265 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
266 ul, iph->protocol,
267 skb->csum);
269 if (!udph->check)
270 udph->check = CSUM_MANGLED_0;
273 skb->ip_summed = CHECKSUM_NONE;
275 ignore_obscure_skb:
276 return 1;
279 static int tcf_csum_ipv6_udp(struct sk_buff *skb, struct ipv6hdr *ip6h,
280 unsigned int ihl, unsigned int ipl, int udplite)
282 struct udphdr *udph;
283 u16 ul;
286 * Support both UDP and UDPLITE checksum algorithms, Don't use
287 * udph->len to get the real length without any protocol check,
288 * UDPLITE uses udph->len for another thing,
289 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
292 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
293 if (udph == NULL)
294 return 0;
296 ul = ntohs(udph->len);
298 udph->check = 0;
300 if (udplite) {
301 if (ul == 0)
302 skb->csum = csum_partial(udph, ipl - ihl, 0);
304 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
305 skb->csum = csum_partial(udph, ul, 0);
307 else
308 goto ignore_obscure_skb;
309 } else {
310 if (ul != ipl - ihl)
311 goto ignore_obscure_skb;
313 skb->csum = csum_partial(udph, ul, 0);
316 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
317 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
318 skb->csum);
320 if (!udph->check)
321 udph->check = CSUM_MANGLED_0;
323 skb->ip_summed = CHECKSUM_NONE;
325 ignore_obscure_skb:
326 return 1;
329 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
331 struct iphdr *iph;
332 int ntkoff;
334 ntkoff = skb_network_offset(skb);
336 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
337 goto fail;
339 iph = ip_hdr(skb);
341 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
342 case IPPROTO_ICMP:
343 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
344 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
345 ntohs(iph->tot_len)))
346 goto fail;
347 break;
348 case IPPROTO_IGMP:
349 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
350 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
351 ntohs(iph->tot_len)))
352 goto fail;
353 break;
354 case IPPROTO_TCP:
355 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
356 if (!tcf_csum_ipv4_tcp(skb, iph, iph->ihl * 4,
357 ntohs(iph->tot_len)))
358 goto fail;
359 break;
360 case IPPROTO_UDP:
361 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
362 if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4,
363 ntohs(iph->tot_len), 0))
364 goto fail;
365 break;
366 case IPPROTO_UDPLITE:
367 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
368 if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4,
369 ntohs(iph->tot_len), 1))
370 goto fail;
371 break;
374 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
375 if (skb_cloned(skb) &&
376 !skb_clone_writable(skb, sizeof(*iph) + ntkoff) &&
377 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
378 goto fail;
380 ip_send_check(iph);
383 return 1;
385 fail:
386 return 0;
389 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh,
390 unsigned int ixhl, unsigned int *pl)
392 int off, len, optlen;
393 unsigned char *xh = (void *)ip6xh;
395 off = sizeof(*ip6xh);
396 len = ixhl - off;
398 while (len > 1) {
399 switch (xh[off]) {
400 case IPV6_TLV_PAD0:
401 optlen = 1;
402 break;
403 case IPV6_TLV_JUMBO:
404 optlen = xh[off + 1] + 2;
405 if (optlen != 6 || len < 6 || (off & 3) != 2)
406 /* wrong jumbo option length/alignment */
407 return 0;
408 *pl = ntohl(*(__be32 *)(xh + off + 2));
409 goto done;
410 default:
411 optlen = xh[off + 1] + 2;
412 if (optlen > len)
413 /* ignore obscure options */
414 goto done;
415 break;
417 off += optlen;
418 len -= optlen;
421 done:
422 return 1;
425 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
427 struct ipv6hdr *ip6h;
428 struct ipv6_opt_hdr *ip6xh;
429 unsigned int hl, ixhl;
430 unsigned int pl;
431 int ntkoff;
432 u8 nexthdr;
434 ntkoff = skb_network_offset(skb);
436 hl = sizeof(*ip6h);
438 if (!pskb_may_pull(skb, hl + ntkoff))
439 goto fail;
441 ip6h = ipv6_hdr(skb);
443 pl = ntohs(ip6h->payload_len);
444 nexthdr = ip6h->nexthdr;
446 do {
447 switch (nexthdr) {
448 case NEXTHDR_FRAGMENT:
449 goto ignore_skb;
450 case NEXTHDR_ROUTING:
451 case NEXTHDR_HOP:
452 case NEXTHDR_DEST:
453 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
454 goto fail;
455 ip6xh = (void *)(skb_network_header(skb) + hl);
456 ixhl = ipv6_optlen(ip6xh);
457 if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
458 goto fail;
459 if ((nexthdr == NEXTHDR_HOP) &&
460 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
461 goto fail;
462 nexthdr = ip6xh->nexthdr;
463 hl += ixhl;
464 break;
465 case IPPROTO_ICMPV6:
466 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
467 if (!tcf_csum_ipv6_icmp(skb, ip6h,
468 hl, pl + sizeof(*ip6h)))
469 goto fail;
470 goto done;
471 case IPPROTO_TCP:
472 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
473 if (!tcf_csum_ipv6_tcp(skb, ip6h,
474 hl, pl + sizeof(*ip6h)))
475 goto fail;
476 goto done;
477 case IPPROTO_UDP:
478 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
479 if (!tcf_csum_ipv6_udp(skb, ip6h, hl,
480 pl + sizeof(*ip6h), 0))
481 goto fail;
482 goto done;
483 case IPPROTO_UDPLITE:
484 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
485 if (!tcf_csum_ipv6_udp(skb, ip6h, hl,
486 pl + sizeof(*ip6h), 1))
487 goto fail;
488 goto done;
489 default:
490 goto ignore_skb;
492 } while (pskb_may_pull(skb, hl + 1 + ntkoff));
494 done:
495 ignore_skb:
496 return 1;
498 fail:
499 return 0;
502 static int tcf_csum(struct sk_buff *skb,
503 struct tc_action *a, struct tcf_result *res)
505 struct tcf_csum *p = a->priv;
506 int action;
507 u32 update_flags;
509 spin_lock(&p->tcf_lock);
510 p->tcf_tm.lastuse = jiffies;
511 bstats_update(&p->tcf_bstats, skb);
512 action = p->tcf_action;
513 update_flags = p->update_flags;
514 spin_unlock(&p->tcf_lock);
516 if (unlikely(action == TC_ACT_SHOT))
517 goto drop;
519 switch (skb->protocol) {
520 case cpu_to_be16(ETH_P_IP):
521 if (!tcf_csum_ipv4(skb, update_flags))
522 goto drop;
523 break;
524 case cpu_to_be16(ETH_P_IPV6):
525 if (!tcf_csum_ipv6(skb, update_flags))
526 goto drop;
527 break;
530 return action;
532 drop:
533 spin_lock(&p->tcf_lock);
534 p->tcf_qstats.drops++;
535 spin_unlock(&p->tcf_lock);
536 return TC_ACT_SHOT;
539 static int tcf_csum_dump(struct sk_buff *skb,
540 struct tc_action *a, int bind, int ref)
542 unsigned char *b = skb_tail_pointer(skb);
543 struct tcf_csum *p = a->priv;
544 struct tc_csum opt = {
545 .update_flags = p->update_flags,
546 .index = p->tcf_index,
547 .action = p->tcf_action,
548 .refcnt = p->tcf_refcnt - ref,
549 .bindcnt = p->tcf_bindcnt - bind,
551 struct tcf_t t;
553 NLA_PUT(skb, TCA_CSUM_PARMS, sizeof(opt), &opt);
554 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
555 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
556 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
557 NLA_PUT(skb, TCA_CSUM_TM, sizeof(t), &t);
559 return skb->len;
561 nla_put_failure:
562 nlmsg_trim(skb, b);
563 return -1;
566 static struct tc_action_ops act_csum_ops = {
567 .kind = "csum",
568 .hinfo = &csum_hash_info,
569 .type = TCA_ACT_CSUM,
570 .capab = TCA_CAP_NONE,
571 .owner = THIS_MODULE,
572 .act = tcf_csum,
573 .dump = tcf_csum_dump,
574 .cleanup = tcf_csum_cleanup,
575 .lookup = tcf_hash_search,
576 .init = tcf_csum_init,
577 .walk = tcf_generic_walker
580 MODULE_DESCRIPTION("Checksum updating actions");
581 MODULE_LICENSE("GPL");
583 static int __init csum_init_module(void)
585 return tcf_register_action(&act_csum_ops);
588 static void __exit csum_cleanup_module(void)
590 tcf_unregister_action(&act_csum_ops);
593 module_init(csum_init_module);
594 module_exit(csum_cleanup_module);