tracing/snapshot: Resize spare buffer if size changed
[linux/fpc-iii.git] / net / sched / cls_flow.c
blob2bb043cd436b3a5135a0f6f80ecd083a348be639
1 /*
2 * net/sched/cls_flow.c Generic flow classifier
4 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/list.h>
15 #include <linux/jhash.h>
16 #include <linux/random.h>
17 #include <linux/pkt_cls.h>
18 #include <linux/skbuff.h>
19 #include <linux/in.h>
20 #include <linux/ip.h>
21 #include <linux/ipv6.h>
22 #include <linux/if_vlan.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <net/inet_sock.h>
27 #include <net/pkt_cls.h>
28 #include <net/ip.h>
29 #include <net/route.h>
30 #include <net/flow_dissector.h>
32 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
33 #include <net/netfilter/nf_conntrack.h>
34 #endif
36 struct flow_head {
37 struct list_head filters;
38 struct rcu_head rcu;
41 struct flow_filter {
42 struct list_head list;
43 struct tcf_exts exts;
44 struct tcf_ematch_tree ematches;
45 struct tcf_proto *tp;
46 struct timer_list perturb_timer;
47 u32 perturb_period;
48 u32 handle;
50 u32 nkeys;
51 u32 keymask;
52 u32 mode;
53 u32 mask;
54 u32 xor;
55 u32 rshift;
56 u32 addend;
57 u32 divisor;
58 u32 baseclass;
59 u32 hashrnd;
60 struct rcu_work rwork;
63 static inline u32 addr_fold(void *addr)
65 unsigned long a = (unsigned long)addr;
67 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
70 static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
72 __be32 src = flow_get_u32_src(flow);
74 if (src)
75 return ntohl(src);
77 return addr_fold(skb->sk);
80 static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
82 __be32 dst = flow_get_u32_dst(flow);
84 if (dst)
85 return ntohl(dst);
87 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
90 static u32 flow_get_proto(const struct sk_buff *skb,
91 const struct flow_keys *flow)
93 return flow->basic.ip_proto;
96 static u32 flow_get_proto_src(const struct sk_buff *skb,
97 const struct flow_keys *flow)
99 if (flow->ports.ports)
100 return ntohs(flow->ports.src);
102 return addr_fold(skb->sk);
105 static u32 flow_get_proto_dst(const struct sk_buff *skb,
106 const struct flow_keys *flow)
108 if (flow->ports.ports)
109 return ntohs(flow->ports.dst);
111 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
114 static u32 flow_get_iif(const struct sk_buff *skb)
116 return skb->skb_iif;
119 static u32 flow_get_priority(const struct sk_buff *skb)
121 return skb->priority;
124 static u32 flow_get_mark(const struct sk_buff *skb)
126 return skb->mark;
129 static u32 flow_get_nfct(const struct sk_buff *skb)
131 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
132 return addr_fold(skb_nfct(skb));
133 #else
134 return 0;
135 #endif
138 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
139 #define CTTUPLE(skb, member) \
140 ({ \
141 enum ip_conntrack_info ctinfo; \
142 const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
143 if (ct == NULL) \
144 goto fallback; \
145 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
147 #else
148 #define CTTUPLE(skb, member) \
149 ({ \
150 goto fallback; \
151 0; \
153 #endif
155 static u32 flow_get_nfct_src(const struct sk_buff *skb,
156 const struct flow_keys *flow)
158 switch (tc_skb_protocol(skb)) {
159 case htons(ETH_P_IP):
160 return ntohl(CTTUPLE(skb, src.u3.ip));
161 case htons(ETH_P_IPV6):
162 return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
164 fallback:
165 return flow_get_src(skb, flow);
168 static u32 flow_get_nfct_dst(const struct sk_buff *skb,
169 const struct flow_keys *flow)
171 switch (tc_skb_protocol(skb)) {
172 case htons(ETH_P_IP):
173 return ntohl(CTTUPLE(skb, dst.u3.ip));
174 case htons(ETH_P_IPV6):
175 return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
177 fallback:
178 return flow_get_dst(skb, flow);
181 static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
182 const struct flow_keys *flow)
184 return ntohs(CTTUPLE(skb, src.u.all));
185 fallback:
186 return flow_get_proto_src(skb, flow);
189 static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
190 const struct flow_keys *flow)
192 return ntohs(CTTUPLE(skb, dst.u.all));
193 fallback:
194 return flow_get_proto_dst(skb, flow);
197 static u32 flow_get_rtclassid(const struct sk_buff *skb)
199 #ifdef CONFIG_IP_ROUTE_CLASSID
200 if (skb_dst(skb))
201 return skb_dst(skb)->tclassid;
202 #endif
203 return 0;
206 static u32 flow_get_skuid(const struct sk_buff *skb)
208 struct sock *sk = skb_to_full_sk(skb);
210 if (sk && sk->sk_socket && sk->sk_socket->file) {
211 kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
213 return from_kuid(&init_user_ns, skuid);
215 return 0;
218 static u32 flow_get_skgid(const struct sk_buff *skb)
220 struct sock *sk = skb_to_full_sk(skb);
222 if (sk && sk->sk_socket && sk->sk_socket->file) {
223 kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
225 return from_kgid(&init_user_ns, skgid);
227 return 0;
230 static u32 flow_get_vlan_tag(const struct sk_buff *skb)
232 u16 uninitialized_var(tag);
234 if (vlan_get_tag(skb, &tag) < 0)
235 return 0;
236 return tag & VLAN_VID_MASK;
239 static u32 flow_get_rxhash(struct sk_buff *skb)
241 return skb_get_hash(skb);
244 static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
246 switch (key) {
247 case FLOW_KEY_SRC:
248 return flow_get_src(skb, flow);
249 case FLOW_KEY_DST:
250 return flow_get_dst(skb, flow);
251 case FLOW_KEY_PROTO:
252 return flow_get_proto(skb, flow);
253 case FLOW_KEY_PROTO_SRC:
254 return flow_get_proto_src(skb, flow);
255 case FLOW_KEY_PROTO_DST:
256 return flow_get_proto_dst(skb, flow);
257 case FLOW_KEY_IIF:
258 return flow_get_iif(skb);
259 case FLOW_KEY_PRIORITY:
260 return flow_get_priority(skb);
261 case FLOW_KEY_MARK:
262 return flow_get_mark(skb);
263 case FLOW_KEY_NFCT:
264 return flow_get_nfct(skb);
265 case FLOW_KEY_NFCT_SRC:
266 return flow_get_nfct_src(skb, flow);
267 case FLOW_KEY_NFCT_DST:
268 return flow_get_nfct_dst(skb, flow);
269 case FLOW_KEY_NFCT_PROTO_SRC:
270 return flow_get_nfct_proto_src(skb, flow);
271 case FLOW_KEY_NFCT_PROTO_DST:
272 return flow_get_nfct_proto_dst(skb, flow);
273 case FLOW_KEY_RTCLASSID:
274 return flow_get_rtclassid(skb);
275 case FLOW_KEY_SKUID:
276 return flow_get_skuid(skb);
277 case FLOW_KEY_SKGID:
278 return flow_get_skgid(skb);
279 case FLOW_KEY_VLAN_TAG:
280 return flow_get_vlan_tag(skb);
281 case FLOW_KEY_RXHASH:
282 return flow_get_rxhash(skb);
283 default:
284 WARN_ON(1);
285 return 0;
289 #define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \
290 (1 << FLOW_KEY_DST) | \
291 (1 << FLOW_KEY_PROTO) | \
292 (1 << FLOW_KEY_PROTO_SRC) | \
293 (1 << FLOW_KEY_PROTO_DST) | \
294 (1 << FLOW_KEY_NFCT_SRC) | \
295 (1 << FLOW_KEY_NFCT_DST) | \
296 (1 << FLOW_KEY_NFCT_PROTO_SRC) | \
297 (1 << FLOW_KEY_NFCT_PROTO_DST))
299 static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
300 struct tcf_result *res)
302 struct flow_head *head = rcu_dereference_bh(tp->root);
303 struct flow_filter *f;
304 u32 keymask;
305 u32 classid;
306 unsigned int n, key;
307 int r;
309 list_for_each_entry_rcu(f, &head->filters, list) {
310 u32 keys[FLOW_KEY_MAX + 1];
311 struct flow_keys flow_keys;
313 if (!tcf_em_tree_match(skb, &f->ematches, NULL))
314 continue;
316 keymask = f->keymask;
317 if (keymask & FLOW_KEYS_NEEDED)
318 skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
320 for (n = 0; n < f->nkeys; n++) {
321 key = ffs(keymask) - 1;
322 keymask &= ~(1 << key);
323 keys[n] = flow_key_get(skb, key, &flow_keys);
326 if (f->mode == FLOW_MODE_HASH)
327 classid = jhash2(keys, f->nkeys, f->hashrnd);
328 else {
329 classid = keys[0];
330 classid = (classid & f->mask) ^ f->xor;
331 classid = (classid >> f->rshift) + f->addend;
334 if (f->divisor)
335 classid %= f->divisor;
337 res->class = 0;
338 res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
340 r = tcf_exts_exec(skb, &f->exts, res);
341 if (r < 0)
342 continue;
343 return r;
345 return -1;
348 static void flow_perturbation(struct timer_list *t)
350 struct flow_filter *f = from_timer(f, t, perturb_timer);
352 get_random_bytes(&f->hashrnd, 4);
353 if (f->perturb_period)
354 mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
357 static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
358 [TCA_FLOW_KEYS] = { .type = NLA_U32 },
359 [TCA_FLOW_MODE] = { .type = NLA_U32 },
360 [TCA_FLOW_BASECLASS] = { .type = NLA_U32 },
361 [TCA_FLOW_RSHIFT] = { .type = NLA_U32 },
362 [TCA_FLOW_ADDEND] = { .type = NLA_U32 },
363 [TCA_FLOW_MASK] = { .type = NLA_U32 },
364 [TCA_FLOW_XOR] = { .type = NLA_U32 },
365 [TCA_FLOW_DIVISOR] = { .type = NLA_U32 },
366 [TCA_FLOW_ACT] = { .type = NLA_NESTED },
367 [TCA_FLOW_POLICE] = { .type = NLA_NESTED },
368 [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED },
369 [TCA_FLOW_PERTURB] = { .type = NLA_U32 },
372 static void __flow_destroy_filter(struct flow_filter *f)
374 del_timer_sync(&f->perturb_timer);
375 tcf_exts_destroy(&f->exts);
376 tcf_em_tree_destroy(&f->ematches);
377 tcf_exts_put_net(&f->exts);
378 kfree(f);
381 static void flow_destroy_filter_work(struct work_struct *work)
383 struct flow_filter *f = container_of(to_rcu_work(work),
384 struct flow_filter,
385 rwork);
386 rtnl_lock();
387 __flow_destroy_filter(f);
388 rtnl_unlock();
391 static int flow_change(struct net *net, struct sk_buff *in_skb,
392 struct tcf_proto *tp, unsigned long base,
393 u32 handle, struct nlattr **tca,
394 void **arg, bool ovr, struct netlink_ext_ack *extack)
396 struct flow_head *head = rtnl_dereference(tp->root);
397 struct flow_filter *fold, *fnew;
398 struct nlattr *opt = tca[TCA_OPTIONS];
399 struct nlattr *tb[TCA_FLOW_MAX + 1];
400 unsigned int nkeys = 0;
401 unsigned int perturb_period = 0;
402 u32 baseclass = 0;
403 u32 keymask = 0;
404 u32 mode;
405 int err;
407 if (opt == NULL)
408 return -EINVAL;
410 err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy, NULL);
411 if (err < 0)
412 return err;
414 if (tb[TCA_FLOW_BASECLASS]) {
415 baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
416 if (TC_H_MIN(baseclass) == 0)
417 return -EINVAL;
420 if (tb[TCA_FLOW_KEYS]) {
421 keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
423 nkeys = hweight32(keymask);
424 if (nkeys == 0)
425 return -EINVAL;
427 if (fls(keymask) - 1 > FLOW_KEY_MAX)
428 return -EOPNOTSUPP;
430 if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
431 sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns)
432 return -EOPNOTSUPP;
435 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
436 if (!fnew)
437 return -ENOBUFS;
439 err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
440 if (err < 0)
441 goto err1;
443 err = tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
444 if (err < 0)
445 goto err2;
447 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr,
448 extack);
449 if (err < 0)
450 goto err2;
452 fold = *arg;
453 if (fold) {
454 err = -EINVAL;
455 if (fold->handle != handle && handle)
456 goto err2;
458 /* Copy fold into fnew */
459 fnew->tp = fold->tp;
460 fnew->handle = fold->handle;
461 fnew->nkeys = fold->nkeys;
462 fnew->keymask = fold->keymask;
463 fnew->mode = fold->mode;
464 fnew->mask = fold->mask;
465 fnew->xor = fold->xor;
466 fnew->rshift = fold->rshift;
467 fnew->addend = fold->addend;
468 fnew->divisor = fold->divisor;
469 fnew->baseclass = fold->baseclass;
470 fnew->hashrnd = fold->hashrnd;
472 mode = fold->mode;
473 if (tb[TCA_FLOW_MODE])
474 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
475 if (mode != FLOW_MODE_HASH && nkeys > 1)
476 goto err2;
478 if (mode == FLOW_MODE_HASH)
479 perturb_period = fold->perturb_period;
480 if (tb[TCA_FLOW_PERTURB]) {
481 if (mode != FLOW_MODE_HASH)
482 goto err2;
483 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
485 } else {
486 err = -EINVAL;
487 if (!handle)
488 goto err2;
489 if (!tb[TCA_FLOW_KEYS])
490 goto err2;
492 mode = FLOW_MODE_MAP;
493 if (tb[TCA_FLOW_MODE])
494 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
495 if (mode != FLOW_MODE_HASH && nkeys > 1)
496 goto err2;
498 if (tb[TCA_FLOW_PERTURB]) {
499 if (mode != FLOW_MODE_HASH)
500 goto err2;
501 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
504 if (TC_H_MAJ(baseclass) == 0) {
505 struct Qdisc *q = tcf_block_q(tp->chain->block);
507 baseclass = TC_H_MAKE(q->handle, baseclass);
509 if (TC_H_MIN(baseclass) == 0)
510 baseclass = TC_H_MAKE(baseclass, 1);
512 fnew->handle = handle;
513 fnew->mask = ~0U;
514 fnew->tp = tp;
515 get_random_bytes(&fnew->hashrnd, 4);
518 timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE);
520 tcf_block_netif_keep_dst(tp->chain->block);
522 if (tb[TCA_FLOW_KEYS]) {
523 fnew->keymask = keymask;
524 fnew->nkeys = nkeys;
527 fnew->mode = mode;
529 if (tb[TCA_FLOW_MASK])
530 fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
531 if (tb[TCA_FLOW_XOR])
532 fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
533 if (tb[TCA_FLOW_RSHIFT])
534 fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
535 if (tb[TCA_FLOW_ADDEND])
536 fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
538 if (tb[TCA_FLOW_DIVISOR])
539 fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
540 if (baseclass)
541 fnew->baseclass = baseclass;
543 fnew->perturb_period = perturb_period;
544 if (perturb_period)
545 mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
547 if (!*arg)
548 list_add_tail_rcu(&fnew->list, &head->filters);
549 else
550 list_replace_rcu(&fold->list, &fnew->list);
552 *arg = fnew;
554 if (fold) {
555 tcf_exts_get_net(&fold->exts);
556 tcf_queue_work(&fold->rwork, flow_destroy_filter_work);
558 return 0;
560 err2:
561 tcf_exts_destroy(&fnew->exts);
562 tcf_em_tree_destroy(&fnew->ematches);
563 err1:
564 kfree(fnew);
565 return err;
568 static int flow_delete(struct tcf_proto *tp, void *arg, bool *last,
569 struct netlink_ext_ack *extack)
571 struct flow_head *head = rtnl_dereference(tp->root);
572 struct flow_filter *f = arg;
574 list_del_rcu(&f->list);
575 tcf_exts_get_net(&f->exts);
576 tcf_queue_work(&f->rwork, flow_destroy_filter_work);
577 *last = list_empty(&head->filters);
578 return 0;
581 static int flow_init(struct tcf_proto *tp)
583 struct flow_head *head;
585 head = kzalloc(sizeof(*head), GFP_KERNEL);
586 if (head == NULL)
587 return -ENOBUFS;
588 INIT_LIST_HEAD(&head->filters);
589 rcu_assign_pointer(tp->root, head);
590 return 0;
593 static void flow_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
595 struct flow_head *head = rtnl_dereference(tp->root);
596 struct flow_filter *f, *next;
598 list_for_each_entry_safe(f, next, &head->filters, list) {
599 list_del_rcu(&f->list);
600 if (tcf_exts_get_net(&f->exts))
601 tcf_queue_work(&f->rwork, flow_destroy_filter_work);
602 else
603 __flow_destroy_filter(f);
605 kfree_rcu(head, rcu);
608 static void *flow_get(struct tcf_proto *tp, u32 handle)
610 struct flow_head *head = rtnl_dereference(tp->root);
611 struct flow_filter *f;
613 list_for_each_entry(f, &head->filters, list)
614 if (f->handle == handle)
615 return f;
616 return NULL;
619 static int flow_dump(struct net *net, struct tcf_proto *tp, void *fh,
620 struct sk_buff *skb, struct tcmsg *t)
622 struct flow_filter *f = fh;
623 struct nlattr *nest;
625 if (f == NULL)
626 return skb->len;
628 t->tcm_handle = f->handle;
630 nest = nla_nest_start(skb, TCA_OPTIONS);
631 if (nest == NULL)
632 goto nla_put_failure;
634 if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
635 nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
636 goto nla_put_failure;
638 if (f->mask != ~0 || f->xor != 0) {
639 if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
640 nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
641 goto nla_put_failure;
643 if (f->rshift &&
644 nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
645 goto nla_put_failure;
646 if (f->addend &&
647 nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
648 goto nla_put_failure;
650 if (f->divisor &&
651 nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
652 goto nla_put_failure;
653 if (f->baseclass &&
654 nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
655 goto nla_put_failure;
657 if (f->perturb_period &&
658 nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
659 goto nla_put_failure;
661 if (tcf_exts_dump(skb, &f->exts) < 0)
662 goto nla_put_failure;
663 #ifdef CONFIG_NET_EMATCH
664 if (f->ematches.hdr.nmatches &&
665 tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
666 goto nla_put_failure;
667 #endif
668 nla_nest_end(skb, nest);
670 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
671 goto nla_put_failure;
673 return skb->len;
675 nla_put_failure:
676 nla_nest_cancel(skb, nest);
677 return -1;
680 static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg)
682 struct flow_head *head = rtnl_dereference(tp->root);
683 struct flow_filter *f;
685 list_for_each_entry(f, &head->filters, list) {
686 if (arg->count < arg->skip)
687 goto skip;
688 if (arg->fn(tp, f, arg) < 0) {
689 arg->stop = 1;
690 break;
692 skip:
693 arg->count++;
697 static struct tcf_proto_ops cls_flow_ops __read_mostly = {
698 .kind = "flow",
699 .classify = flow_classify,
700 .init = flow_init,
701 .destroy = flow_destroy,
702 .change = flow_change,
703 .delete = flow_delete,
704 .get = flow_get,
705 .dump = flow_dump,
706 .walk = flow_walk,
707 .owner = THIS_MODULE,
710 static int __init cls_flow_init(void)
712 return register_tcf_proto_ops(&cls_flow_ops);
715 static void __exit cls_flow_exit(void)
717 unregister_tcf_proto_ops(&cls_flow_ops);
720 module_init(cls_flow_init);
721 module_exit(cls_flow_exit);
723 MODULE_LICENSE("GPL");
724 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
725 MODULE_DESCRIPTION("TC flow classifier");