1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_api.c Packet classifier API.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/rhashtable.h>
24 #include <net/net_namespace.h>
26 #include <net/netlink.h>
27 #include <net/pkt_sched.h>
28 #include <net/pkt_cls.h>
29 #include <net/tc_act/tc_pedit.h>
30 #include <net/tc_act/tc_mirred.h>
31 #include <net/tc_act/tc_vlan.h>
32 #include <net/tc_act/tc_tunnel_key.h>
33 #include <net/tc_act/tc_csum.h>
34 #include <net/tc_act/tc_gact.h>
35 #include <net/tc_act/tc_police.h>
36 #include <net/tc_act/tc_sample.h>
37 #include <net/tc_act/tc_skbedit.h>
39 extern const struct nla_policy rtm_tca_policy
[TCA_MAX
+ 1];
41 /* The list of all installed classifier types */
42 static LIST_HEAD(tcf_proto_base
);
44 /* Protects list of registered TC modules. It is pure SMP lock. */
45 static DEFINE_RWLOCK(cls_mod_lock
);
47 /* Find classifier type by string name */
49 static const struct tcf_proto_ops
*__tcf_proto_lookup_ops(const char *kind
)
51 const struct tcf_proto_ops
*t
, *res
= NULL
;
54 read_lock(&cls_mod_lock
);
55 list_for_each_entry(t
, &tcf_proto_base
, head
) {
56 if (strcmp(kind
, t
->kind
) == 0) {
57 if (try_module_get(t
->owner
))
62 read_unlock(&cls_mod_lock
);
67 static const struct tcf_proto_ops
*
68 tcf_proto_lookup_ops(const char *kind
, bool rtnl_held
,
69 struct netlink_ext_ack
*extack
)
71 const struct tcf_proto_ops
*ops
;
73 ops
= __tcf_proto_lookup_ops(kind
);
79 request_module("cls_%s", kind
);
82 ops
= __tcf_proto_lookup_ops(kind
);
83 /* We dropped the RTNL semaphore in order to perform
84 * the module load. So, even if we succeeded in loading
85 * the module we have to replay the request. We indicate
89 module_put(ops
->owner
);
90 return ERR_PTR(-EAGAIN
);
93 NL_SET_ERR_MSG(extack
, "TC classifier not found");
94 return ERR_PTR(-ENOENT
);
97 /* Register(unregister) new classifier type */
99 int register_tcf_proto_ops(struct tcf_proto_ops
*ops
)
101 struct tcf_proto_ops
*t
;
104 write_lock(&cls_mod_lock
);
105 list_for_each_entry(t
, &tcf_proto_base
, head
)
106 if (!strcmp(ops
->kind
, t
->kind
))
109 list_add_tail(&ops
->head
, &tcf_proto_base
);
112 write_unlock(&cls_mod_lock
);
115 EXPORT_SYMBOL(register_tcf_proto_ops
);
117 static struct workqueue_struct
*tc_filter_wq
;
119 int unregister_tcf_proto_ops(struct tcf_proto_ops
*ops
)
121 struct tcf_proto_ops
*t
;
124 /* Wait for outstanding call_rcu()s, if any, from a
125 * tcf_proto_ops's destroy() handler.
128 flush_workqueue(tc_filter_wq
);
130 write_lock(&cls_mod_lock
);
131 list_for_each_entry(t
, &tcf_proto_base
, head
) {
138 write_unlock(&cls_mod_lock
);
141 EXPORT_SYMBOL(unregister_tcf_proto_ops
);
143 bool tcf_queue_work(struct rcu_work
*rwork
, work_func_t func
)
145 INIT_RCU_WORK(rwork
, func
);
146 return queue_rcu_work(tc_filter_wq
, rwork
);
148 EXPORT_SYMBOL(tcf_queue_work
);
150 /* Select new prio value from the range, managed by kernel. */
152 static inline u32
tcf_auto_prio(struct tcf_proto
*tp
)
154 u32 first
= TC_H_MAKE(0xC0000000U
, 0U);
157 first
= tp
->prio
- 1;
159 return TC_H_MAJ(first
);
162 static bool tcf_proto_is_unlocked(const char *kind
)
164 const struct tcf_proto_ops
*ops
;
167 ops
= tcf_proto_lookup_ops(kind
, false, NULL
);
168 /* On error return false to take rtnl lock. Proto lookup/create
169 * functions will perform lookup again and properly handle errors.
174 ret
= !!(ops
->flags
& TCF_PROTO_OPS_DOIT_UNLOCKED
);
175 module_put(ops
->owner
);
179 static struct tcf_proto
*tcf_proto_create(const char *kind
, u32 protocol
,
180 u32 prio
, struct tcf_chain
*chain
,
182 struct netlink_ext_ack
*extack
)
184 struct tcf_proto
*tp
;
187 tp
= kzalloc(sizeof(*tp
), GFP_KERNEL
);
189 return ERR_PTR(-ENOBUFS
);
191 tp
->ops
= tcf_proto_lookup_ops(kind
, rtnl_held
, extack
);
192 if (IS_ERR(tp
->ops
)) {
193 err
= PTR_ERR(tp
->ops
);
196 tp
->classify
= tp
->ops
->classify
;
197 tp
->protocol
= protocol
;
200 spin_lock_init(&tp
->lock
);
201 refcount_set(&tp
->refcnt
, 1);
203 err
= tp
->ops
->init(tp
);
205 module_put(tp
->ops
->owner
);
215 static void tcf_proto_get(struct tcf_proto
*tp
)
217 refcount_inc(&tp
->refcnt
);
220 static void tcf_chain_put(struct tcf_chain
*chain
);
222 static void tcf_proto_destroy(struct tcf_proto
*tp
, bool rtnl_held
,
223 struct netlink_ext_ack
*extack
)
225 tp
->ops
->destroy(tp
, rtnl_held
, extack
);
226 tcf_chain_put(tp
->chain
);
227 module_put(tp
->ops
->owner
);
231 static void tcf_proto_put(struct tcf_proto
*tp
, bool rtnl_held
,
232 struct netlink_ext_ack
*extack
)
234 if (refcount_dec_and_test(&tp
->refcnt
))
235 tcf_proto_destroy(tp
, rtnl_held
, extack
);
238 static int walker_check_empty(struct tcf_proto
*tp
, void *fh
,
239 struct tcf_walker
*arg
)
242 arg
->nonempty
= true;
248 static bool tcf_proto_is_empty(struct tcf_proto
*tp
, bool rtnl_held
)
250 struct tcf_walker walker
= { .fn
= walker_check_empty
, };
253 tp
->ops
->walk(tp
, &walker
, rtnl_held
);
254 return !walker
.nonempty
;
259 static bool tcf_proto_check_delete(struct tcf_proto
*tp
, bool rtnl_held
)
261 spin_lock(&tp
->lock
);
262 if (tcf_proto_is_empty(tp
, rtnl_held
))
264 spin_unlock(&tp
->lock
);
268 static void tcf_proto_mark_delete(struct tcf_proto
*tp
)
270 spin_lock(&tp
->lock
);
272 spin_unlock(&tp
->lock
);
275 static bool tcf_proto_is_deleting(struct tcf_proto
*tp
)
279 spin_lock(&tp
->lock
);
280 deleting
= tp
->deleting
;
281 spin_unlock(&tp
->lock
);
286 #define ASSERT_BLOCK_LOCKED(block) \
287 lockdep_assert_held(&(block)->lock)
289 struct tcf_filter_chain_list_item
{
290 struct list_head list
;
291 tcf_chain_head_change_t
*chain_head_change
;
292 void *chain_head_change_priv
;
295 static struct tcf_chain
*tcf_chain_create(struct tcf_block
*block
,
298 struct tcf_chain
*chain
;
300 ASSERT_BLOCK_LOCKED(block
);
302 chain
= kzalloc(sizeof(*chain
), GFP_KERNEL
);
305 list_add_tail(&chain
->list
, &block
->chain_list
);
306 mutex_init(&chain
->filter_chain_lock
);
307 chain
->block
= block
;
308 chain
->index
= chain_index
;
311 block
->chain0
.chain
= chain
;
315 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item
*item
,
316 struct tcf_proto
*tp_head
)
318 if (item
->chain_head_change
)
319 item
->chain_head_change(tp_head
, item
->chain_head_change_priv
);
322 static void tcf_chain0_head_change(struct tcf_chain
*chain
,
323 struct tcf_proto
*tp_head
)
325 struct tcf_filter_chain_list_item
*item
;
326 struct tcf_block
*block
= chain
->block
;
331 mutex_lock(&block
->lock
);
332 list_for_each_entry(item
, &block
->chain0
.filter_chain_list
, list
)
333 tcf_chain_head_change_item(item
, tp_head
);
334 mutex_unlock(&block
->lock
);
337 /* Returns true if block can be safely freed. */
339 static bool tcf_chain_detach(struct tcf_chain
*chain
)
341 struct tcf_block
*block
= chain
->block
;
343 ASSERT_BLOCK_LOCKED(block
);
345 list_del(&chain
->list
);
347 block
->chain0
.chain
= NULL
;
349 if (list_empty(&block
->chain_list
) &&
350 refcount_read(&block
->refcnt
) == 0)
356 static void tcf_block_destroy(struct tcf_block
*block
)
358 mutex_destroy(&block
->lock
);
359 kfree_rcu(block
, rcu
);
362 static void tcf_chain_destroy(struct tcf_chain
*chain
, bool free_block
)
364 struct tcf_block
*block
= chain
->block
;
366 mutex_destroy(&chain
->filter_chain_lock
);
367 kfree_rcu(chain
, rcu
);
369 tcf_block_destroy(block
);
372 static void tcf_chain_hold(struct tcf_chain
*chain
)
374 ASSERT_BLOCK_LOCKED(chain
->block
);
379 static bool tcf_chain_held_by_acts_only(struct tcf_chain
*chain
)
381 ASSERT_BLOCK_LOCKED(chain
->block
);
383 /* In case all the references are action references, this
384 * chain should not be shown to the user.
386 return chain
->refcnt
== chain
->action_refcnt
;
389 static struct tcf_chain
*tcf_chain_lookup(struct tcf_block
*block
,
392 struct tcf_chain
*chain
;
394 ASSERT_BLOCK_LOCKED(block
);
396 list_for_each_entry(chain
, &block
->chain_list
, list
) {
397 if (chain
->index
== chain_index
)
403 static int tc_chain_notify(struct tcf_chain
*chain
, struct sk_buff
*oskb
,
404 u32 seq
, u16 flags
, int event
, bool unicast
);
406 static struct tcf_chain
*__tcf_chain_get(struct tcf_block
*block
,
407 u32 chain_index
, bool create
,
410 struct tcf_chain
*chain
= NULL
;
411 bool is_first_reference
;
413 mutex_lock(&block
->lock
);
414 chain
= tcf_chain_lookup(block
, chain_index
);
416 tcf_chain_hold(chain
);
420 chain
= tcf_chain_create(block
, chain_index
);
426 ++chain
->action_refcnt
;
427 is_first_reference
= chain
->refcnt
- chain
->action_refcnt
== 1;
428 mutex_unlock(&block
->lock
);
430 /* Send notification only in case we got the first
431 * non-action reference. Until then, the chain acts only as
432 * a placeholder for actions pointing to it and user ought
433 * not know about them.
435 if (is_first_reference
&& !by_act
)
436 tc_chain_notify(chain
, NULL
, 0, NLM_F_CREATE
| NLM_F_EXCL
,
437 RTM_NEWCHAIN
, false);
442 mutex_unlock(&block
->lock
);
446 static struct tcf_chain
*tcf_chain_get(struct tcf_block
*block
, u32 chain_index
,
449 return __tcf_chain_get(block
, chain_index
, create
, false);
452 struct tcf_chain
*tcf_chain_get_by_act(struct tcf_block
*block
, u32 chain_index
)
454 return __tcf_chain_get(block
, chain_index
, true, true);
456 EXPORT_SYMBOL(tcf_chain_get_by_act
);
458 static void tc_chain_tmplt_del(const struct tcf_proto_ops
*tmplt_ops
,
460 static int tc_chain_notify_delete(const struct tcf_proto_ops
*tmplt_ops
,
461 void *tmplt_priv
, u32 chain_index
,
462 struct tcf_block
*block
, struct sk_buff
*oskb
,
463 u32 seq
, u16 flags
, bool unicast
);
465 static void __tcf_chain_put(struct tcf_chain
*chain
, bool by_act
,
466 bool explicitly_created
)
468 struct tcf_block
*block
= chain
->block
;
469 const struct tcf_proto_ops
*tmplt_ops
;
470 bool free_block
= false;
474 mutex_lock(&block
->lock
);
475 if (explicitly_created
) {
476 if (!chain
->explicitly_created
) {
477 mutex_unlock(&block
->lock
);
480 chain
->explicitly_created
= false;
484 chain
->action_refcnt
--;
486 /* tc_chain_notify_delete can't be called while holding block lock.
487 * However, when block is unlocked chain can be changed concurrently, so
488 * save these to temporary variables.
490 refcnt
= --chain
->refcnt
;
491 tmplt_ops
= chain
->tmplt_ops
;
492 tmplt_priv
= chain
->tmplt_priv
;
494 /* The last dropped non-action reference will trigger notification. */
495 if (refcnt
- chain
->action_refcnt
== 0 && !by_act
) {
496 tc_chain_notify_delete(tmplt_ops
, tmplt_priv
, chain
->index
,
497 block
, NULL
, 0, 0, false);
498 /* Last reference to chain, no need to lock. */
499 chain
->flushing
= false;
503 free_block
= tcf_chain_detach(chain
);
504 mutex_unlock(&block
->lock
);
507 tc_chain_tmplt_del(tmplt_ops
, tmplt_priv
);
508 tcf_chain_destroy(chain
, free_block
);
512 static void tcf_chain_put(struct tcf_chain
*chain
)
514 __tcf_chain_put(chain
, false, false);
517 void tcf_chain_put_by_act(struct tcf_chain
*chain
)
519 __tcf_chain_put(chain
, true, false);
521 EXPORT_SYMBOL(tcf_chain_put_by_act
);
523 static void tcf_chain_put_explicitly_created(struct tcf_chain
*chain
)
525 __tcf_chain_put(chain
, false, true);
528 static void tcf_chain_flush(struct tcf_chain
*chain
, bool rtnl_held
)
530 struct tcf_proto
*tp
, *tp_next
;
532 mutex_lock(&chain
->filter_chain_lock
);
533 tp
= tcf_chain_dereference(chain
->filter_chain
, chain
);
534 RCU_INIT_POINTER(chain
->filter_chain
, NULL
);
535 tcf_chain0_head_change(chain
, NULL
);
536 chain
->flushing
= true;
537 mutex_unlock(&chain
->filter_chain_lock
);
540 tp_next
= rcu_dereference_protected(tp
->next
, 1);
541 tcf_proto_put(tp
, rtnl_held
, NULL
);
546 static struct tcf_block
*tc_dev_ingress_block(struct net_device
*dev
)
548 const struct Qdisc_class_ops
*cops
;
551 if (!dev_ingress_queue(dev
))
554 qdisc
= dev_ingress_queue(dev
)->qdisc_sleeping
;
558 cops
= qdisc
->ops
->cl_ops
;
562 if (!cops
->tcf_block
)
565 return cops
->tcf_block(qdisc
, TC_H_MIN_INGRESS
, NULL
);
568 static struct rhashtable indr_setup_block_ht
;
570 struct tc_indr_block_dev
{
571 struct rhash_head ht_node
;
572 struct net_device
*dev
;
574 struct list_head cb_list
;
575 struct tcf_block
*block
;
578 struct tc_indr_block_cb
{
579 struct list_head list
;
581 tc_indr_block_bind_cb_t
*cb
;
585 static const struct rhashtable_params tc_indr_setup_block_ht_params
= {
586 .key_offset
= offsetof(struct tc_indr_block_dev
, dev
),
587 .head_offset
= offsetof(struct tc_indr_block_dev
, ht_node
),
588 .key_len
= sizeof(struct net_device
*),
591 static struct tc_indr_block_dev
*
592 tc_indr_block_dev_lookup(struct net_device
*dev
)
594 return rhashtable_lookup_fast(&indr_setup_block_ht
, &dev
,
595 tc_indr_setup_block_ht_params
);
598 static struct tc_indr_block_dev
*tc_indr_block_dev_get(struct net_device
*dev
)
600 struct tc_indr_block_dev
*indr_dev
;
602 indr_dev
= tc_indr_block_dev_lookup(dev
);
606 indr_dev
= kzalloc(sizeof(*indr_dev
), GFP_KERNEL
);
610 INIT_LIST_HEAD(&indr_dev
->cb_list
);
612 indr_dev
->block
= tc_dev_ingress_block(dev
);
613 if (rhashtable_insert_fast(&indr_setup_block_ht
, &indr_dev
->ht_node
,
614 tc_indr_setup_block_ht_params
)) {
624 static void tc_indr_block_dev_put(struct tc_indr_block_dev
*indr_dev
)
626 if (--indr_dev
->refcnt
)
629 rhashtable_remove_fast(&indr_setup_block_ht
, &indr_dev
->ht_node
,
630 tc_indr_setup_block_ht_params
);
634 static struct tc_indr_block_cb
*
635 tc_indr_block_cb_lookup(struct tc_indr_block_dev
*indr_dev
,
636 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
)
638 struct tc_indr_block_cb
*indr_block_cb
;
640 list_for_each_entry(indr_block_cb
, &indr_dev
->cb_list
, list
)
641 if (indr_block_cb
->cb
== cb
&&
642 indr_block_cb
->cb_ident
== cb_ident
)
643 return indr_block_cb
;
647 static struct tc_indr_block_cb
*
648 tc_indr_block_cb_add(struct tc_indr_block_dev
*indr_dev
, void *cb_priv
,
649 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
)
651 struct tc_indr_block_cb
*indr_block_cb
;
653 indr_block_cb
= tc_indr_block_cb_lookup(indr_dev
, cb
, cb_ident
);
655 return ERR_PTR(-EEXIST
);
657 indr_block_cb
= kzalloc(sizeof(*indr_block_cb
), GFP_KERNEL
);
659 return ERR_PTR(-ENOMEM
);
661 indr_block_cb
->cb_priv
= cb_priv
;
662 indr_block_cb
->cb
= cb
;
663 indr_block_cb
->cb_ident
= cb_ident
;
664 list_add(&indr_block_cb
->list
, &indr_dev
->cb_list
);
666 return indr_block_cb
;
669 static void tc_indr_block_cb_del(struct tc_indr_block_cb
*indr_block_cb
)
671 list_del(&indr_block_cb
->list
);
672 kfree(indr_block_cb
);
675 static void tc_indr_block_ing_cmd(struct tc_indr_block_dev
*indr_dev
,
676 struct tc_indr_block_cb
*indr_block_cb
,
677 enum tc_block_command command
)
679 struct tc_block_offload bo
= {
681 .binder_type
= TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
,
682 .block
= indr_dev
->block
,
685 if (!indr_dev
->block
)
688 indr_block_cb
->cb(indr_dev
->dev
, indr_block_cb
->cb_priv
, TC_SETUP_BLOCK
,
692 int __tc_indr_block_cb_register(struct net_device
*dev
, void *cb_priv
,
693 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
)
695 struct tc_indr_block_cb
*indr_block_cb
;
696 struct tc_indr_block_dev
*indr_dev
;
699 indr_dev
= tc_indr_block_dev_get(dev
);
703 indr_block_cb
= tc_indr_block_cb_add(indr_dev
, cb_priv
, cb
, cb_ident
);
704 err
= PTR_ERR_OR_ZERO(indr_block_cb
);
708 tc_indr_block_ing_cmd(indr_dev
, indr_block_cb
, TC_BLOCK_BIND
);
712 tc_indr_block_dev_put(indr_dev
);
715 EXPORT_SYMBOL_GPL(__tc_indr_block_cb_register
);
717 int tc_indr_block_cb_register(struct net_device
*dev
, void *cb_priv
,
718 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
)
723 err
= __tc_indr_block_cb_register(dev
, cb_priv
, cb
, cb_ident
);
728 EXPORT_SYMBOL_GPL(tc_indr_block_cb_register
);
730 void __tc_indr_block_cb_unregister(struct net_device
*dev
,
731 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
)
733 struct tc_indr_block_cb
*indr_block_cb
;
734 struct tc_indr_block_dev
*indr_dev
;
736 indr_dev
= tc_indr_block_dev_lookup(dev
);
740 indr_block_cb
= tc_indr_block_cb_lookup(indr_dev
, cb
, cb_ident
);
744 /* Send unbind message if required to free any block cbs. */
745 tc_indr_block_ing_cmd(indr_dev
, indr_block_cb
, TC_BLOCK_UNBIND
);
746 tc_indr_block_cb_del(indr_block_cb
);
747 tc_indr_block_dev_put(indr_dev
);
749 EXPORT_SYMBOL_GPL(__tc_indr_block_cb_unregister
);
751 void tc_indr_block_cb_unregister(struct net_device
*dev
,
752 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
)
755 __tc_indr_block_cb_unregister(dev
, cb
, cb_ident
);
758 EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister
);
760 static void tc_indr_block_call(struct tcf_block
*block
, struct net_device
*dev
,
761 struct tcf_block_ext_info
*ei
,
762 enum tc_block_command command
,
763 struct netlink_ext_ack
*extack
)
765 struct tc_indr_block_cb
*indr_block_cb
;
766 struct tc_indr_block_dev
*indr_dev
;
767 struct tc_block_offload bo
= {
769 .binder_type
= ei
->binder_type
,
774 indr_dev
= tc_indr_block_dev_lookup(dev
);
778 indr_dev
->block
= command
== TC_BLOCK_BIND
? block
: NULL
;
780 list_for_each_entry(indr_block_cb
, &indr_dev
->cb_list
, list
)
781 indr_block_cb
->cb(dev
, indr_block_cb
->cb_priv
, TC_SETUP_BLOCK
,
785 static bool tcf_block_offload_in_use(struct tcf_block
*block
)
787 return block
->offloadcnt
;
790 static int tcf_block_offload_cmd(struct tcf_block
*block
,
791 struct net_device
*dev
,
792 struct tcf_block_ext_info
*ei
,
793 enum tc_block_command command
,
794 struct netlink_ext_ack
*extack
)
796 struct tc_block_offload bo
= {};
798 bo
.command
= command
;
799 bo
.binder_type
= ei
->binder_type
;
802 return dev
->netdev_ops
->ndo_setup_tc(dev
, TC_SETUP_BLOCK
, &bo
);
805 static int tcf_block_offload_bind(struct tcf_block
*block
, struct Qdisc
*q
,
806 struct tcf_block_ext_info
*ei
,
807 struct netlink_ext_ack
*extack
)
809 struct net_device
*dev
= q
->dev_queue
->dev
;
812 if (!dev
->netdev_ops
->ndo_setup_tc
)
813 goto no_offload_dev_inc
;
815 /* If tc offload feature is disabled and the block we try to bind
816 * to already has some offloaded filters, forbid to bind.
818 if (!tc_can_offload(dev
) && tcf_block_offload_in_use(block
)) {
819 NL_SET_ERR_MSG(extack
, "Bind to offloaded block failed as dev has offload disabled");
823 err
= tcf_block_offload_cmd(block
, dev
, ei
, TC_BLOCK_BIND
, extack
);
824 if (err
== -EOPNOTSUPP
)
825 goto no_offload_dev_inc
;
829 tc_indr_block_call(block
, dev
, ei
, TC_BLOCK_BIND
, extack
);
833 if (tcf_block_offload_in_use(block
))
835 block
->nooffloaddevcnt
++;
836 tc_indr_block_call(block
, dev
, ei
, TC_BLOCK_BIND
, extack
);
840 static void tcf_block_offload_unbind(struct tcf_block
*block
, struct Qdisc
*q
,
841 struct tcf_block_ext_info
*ei
)
843 struct net_device
*dev
= q
->dev_queue
->dev
;
846 tc_indr_block_call(block
, dev
, ei
, TC_BLOCK_UNBIND
, NULL
);
848 if (!dev
->netdev_ops
->ndo_setup_tc
)
849 goto no_offload_dev_dec
;
850 err
= tcf_block_offload_cmd(block
, dev
, ei
, TC_BLOCK_UNBIND
, NULL
);
851 if (err
== -EOPNOTSUPP
)
852 goto no_offload_dev_dec
;
856 WARN_ON(block
->nooffloaddevcnt
-- == 0);
860 tcf_chain0_head_change_cb_add(struct tcf_block
*block
,
861 struct tcf_block_ext_info
*ei
,
862 struct netlink_ext_ack
*extack
)
864 struct tcf_filter_chain_list_item
*item
;
865 struct tcf_chain
*chain0
;
867 item
= kmalloc(sizeof(*item
), GFP_KERNEL
);
869 NL_SET_ERR_MSG(extack
, "Memory allocation for head change callback item failed");
872 item
->chain_head_change
= ei
->chain_head_change
;
873 item
->chain_head_change_priv
= ei
->chain_head_change_priv
;
875 mutex_lock(&block
->lock
);
876 chain0
= block
->chain0
.chain
;
878 tcf_chain_hold(chain0
);
880 list_add(&item
->list
, &block
->chain0
.filter_chain_list
);
881 mutex_unlock(&block
->lock
);
884 struct tcf_proto
*tp_head
;
886 mutex_lock(&chain0
->filter_chain_lock
);
888 tp_head
= tcf_chain_dereference(chain0
->filter_chain
, chain0
);
890 tcf_chain_head_change_item(item
, tp_head
);
892 mutex_lock(&block
->lock
);
893 list_add(&item
->list
, &block
->chain0
.filter_chain_list
);
894 mutex_unlock(&block
->lock
);
896 mutex_unlock(&chain0
->filter_chain_lock
);
897 tcf_chain_put(chain0
);
904 tcf_chain0_head_change_cb_del(struct tcf_block
*block
,
905 struct tcf_block_ext_info
*ei
)
907 struct tcf_filter_chain_list_item
*item
;
909 mutex_lock(&block
->lock
);
910 list_for_each_entry(item
, &block
->chain0
.filter_chain_list
, list
) {
911 if ((!ei
->chain_head_change
&& !ei
->chain_head_change_priv
) ||
912 (item
->chain_head_change
== ei
->chain_head_change
&&
913 item
->chain_head_change_priv
== ei
->chain_head_change_priv
)) {
914 if (block
->chain0
.chain
)
915 tcf_chain_head_change_item(item
, NULL
);
916 list_del(&item
->list
);
917 mutex_unlock(&block
->lock
);
923 mutex_unlock(&block
->lock
);
928 spinlock_t idr_lock
; /* Protects idr */
932 static unsigned int tcf_net_id
;
934 static int tcf_block_insert(struct tcf_block
*block
, struct net
*net
,
935 struct netlink_ext_ack
*extack
)
937 struct tcf_net
*tn
= net_generic(net
, tcf_net_id
);
940 idr_preload(GFP_KERNEL
);
941 spin_lock(&tn
->idr_lock
);
942 err
= idr_alloc_u32(&tn
->idr
, block
, &block
->index
, block
->index
,
944 spin_unlock(&tn
->idr_lock
);
950 static void tcf_block_remove(struct tcf_block
*block
, struct net
*net
)
952 struct tcf_net
*tn
= net_generic(net
, tcf_net_id
);
954 spin_lock(&tn
->idr_lock
);
955 idr_remove(&tn
->idr
, block
->index
);
956 spin_unlock(&tn
->idr_lock
);
959 static struct tcf_block
*tcf_block_create(struct net
*net
, struct Qdisc
*q
,
961 struct netlink_ext_ack
*extack
)
963 struct tcf_block
*block
;
965 block
= kzalloc(sizeof(*block
), GFP_KERNEL
);
967 NL_SET_ERR_MSG(extack
, "Memory allocation for block failed");
968 return ERR_PTR(-ENOMEM
);
970 mutex_init(&block
->lock
);
971 INIT_LIST_HEAD(&block
->chain_list
);
972 INIT_LIST_HEAD(&block
->cb_list
);
973 INIT_LIST_HEAD(&block
->owner_list
);
974 INIT_LIST_HEAD(&block
->chain0
.filter_chain_list
);
976 refcount_set(&block
->refcnt
, 1);
978 block
->index
= block_index
;
980 /* Don't store q pointer for blocks which are shared */
981 if (!tcf_block_shared(block
))
986 static struct tcf_block
*tcf_block_lookup(struct net
*net
, u32 block_index
)
988 struct tcf_net
*tn
= net_generic(net
, tcf_net_id
);
990 return idr_find(&tn
->idr
, block_index
);
993 static struct tcf_block
*tcf_block_refcnt_get(struct net
*net
, u32 block_index
)
995 struct tcf_block
*block
;
998 block
= tcf_block_lookup(net
, block_index
);
999 if (block
&& !refcount_inc_not_zero(&block
->refcnt
))
1006 static struct tcf_chain
*
1007 __tcf_get_next_chain(struct tcf_block
*block
, struct tcf_chain
*chain
)
1009 mutex_lock(&block
->lock
);
1011 chain
= list_is_last(&chain
->list
, &block
->chain_list
) ?
1012 NULL
: list_next_entry(chain
, list
);
1014 chain
= list_first_entry_or_null(&block
->chain_list
,
1015 struct tcf_chain
, list
);
1017 /* skip all action-only chains */
1018 while (chain
&& tcf_chain_held_by_acts_only(chain
))
1019 chain
= list_is_last(&chain
->list
, &block
->chain_list
) ?
1020 NULL
: list_next_entry(chain
, list
);
1023 tcf_chain_hold(chain
);
1024 mutex_unlock(&block
->lock
);
1029 /* Function to be used by all clients that want to iterate over all chains on
1030 * block. It properly obtains block->lock and takes reference to chain before
1031 * returning it. Users of this function must be tolerant to concurrent chain
1032 * insertion/deletion or ensure that no concurrent chain modification is
1033 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1034 * consistent dump because rtnl lock is released each time skb is filled with
1035 * data and sent to user-space.
1039 tcf_get_next_chain(struct tcf_block
*block
, struct tcf_chain
*chain
)
1041 struct tcf_chain
*chain_next
= __tcf_get_next_chain(block
, chain
);
1044 tcf_chain_put(chain
);
1048 EXPORT_SYMBOL(tcf_get_next_chain
);
1050 static struct tcf_proto
*
1051 __tcf_get_next_proto(struct tcf_chain
*chain
, struct tcf_proto
*tp
)
1056 mutex_lock(&chain
->filter_chain_lock
);
1059 tp
= tcf_chain_dereference(chain
->filter_chain
, chain
);
1060 } else if (tcf_proto_is_deleting(tp
)) {
1061 /* 'deleting' flag is set and chain->filter_chain_lock was
1062 * unlocked, which means next pointer could be invalid. Restart
1065 prio
= tp
->prio
+ 1;
1066 tp
= tcf_chain_dereference(chain
->filter_chain
, chain
);
1068 for (; tp
; tp
= tcf_chain_dereference(tp
->next
, chain
))
1069 if (!tp
->deleting
&& tp
->prio
>= prio
)
1072 tp
= tcf_chain_dereference(tp
->next
, chain
);
1078 mutex_unlock(&chain
->filter_chain_lock
);
1083 /* Function to be used by all clients that want to iterate over all tp's on
1084 * chain. Users of this function must be tolerant to concurrent tp
1085 * insertion/deletion or ensure that no concurrent chain modification is
1086 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1087 * consistent dump because rtnl lock is released each time skb is filled with
1088 * data and sent to user-space.
1092 tcf_get_next_proto(struct tcf_chain
*chain
, struct tcf_proto
*tp
,
1095 struct tcf_proto
*tp_next
= __tcf_get_next_proto(chain
, tp
);
1098 tcf_proto_put(tp
, rtnl_held
, NULL
);
1102 EXPORT_SYMBOL(tcf_get_next_proto
);
1104 static void tcf_block_flush_all_chains(struct tcf_block
*block
, bool rtnl_held
)
1106 struct tcf_chain
*chain
;
1108 /* Last reference to block. At this point chains cannot be added or
1109 * removed concurrently.
1111 for (chain
= tcf_get_next_chain(block
, NULL
);
1113 chain
= tcf_get_next_chain(block
, chain
)) {
1114 tcf_chain_put_explicitly_created(chain
);
1115 tcf_chain_flush(chain
, rtnl_held
);
1119 /* Lookup Qdisc and increments its reference counter.
1120 * Set parent, if necessary.
1123 static int __tcf_qdisc_find(struct net
*net
, struct Qdisc
**q
,
1124 u32
*parent
, int ifindex
, bool rtnl_held
,
1125 struct netlink_ext_ack
*extack
)
1127 const struct Qdisc_class_ops
*cops
;
1128 struct net_device
*dev
;
1131 if (ifindex
== TCM_IFINDEX_MAGIC_BLOCK
)
1137 dev
= dev_get_by_index_rcu(net
, ifindex
);
1146 *parent
= (*q
)->handle
;
1148 *q
= qdisc_lookup_rcu(dev
, TC_H_MAJ(*parent
));
1150 NL_SET_ERR_MSG(extack
, "Parent Qdisc doesn't exists");
1156 *q
= qdisc_refcount_inc_nz(*q
);
1158 NL_SET_ERR_MSG(extack
, "Parent Qdisc doesn't exists");
1163 /* Is it classful? */
1164 cops
= (*q
)->ops
->cl_ops
;
1166 NL_SET_ERR_MSG(extack
, "Qdisc not classful");
1171 if (!cops
->tcf_block
) {
1172 NL_SET_ERR_MSG(extack
, "Class doesn't support blocks");
1178 /* At this point we know that qdisc is not noop_qdisc,
1179 * which means that qdisc holds a reference to net_device
1180 * and we hold a reference to qdisc, so it is safe to release
1192 qdisc_put_unlocked(*q
);
1198 static int __tcf_qdisc_cl_find(struct Qdisc
*q
, u32 parent
, unsigned long *cl
,
1199 int ifindex
, struct netlink_ext_ack
*extack
)
1201 if (ifindex
== TCM_IFINDEX_MAGIC_BLOCK
)
1204 /* Do we search for filter, attached to class? */
1205 if (TC_H_MIN(parent
)) {
1206 const struct Qdisc_class_ops
*cops
= q
->ops
->cl_ops
;
1208 *cl
= cops
->find(q
, parent
);
1210 NL_SET_ERR_MSG(extack
, "Specified class doesn't exist");
1218 static struct tcf_block
*__tcf_block_find(struct net
*net
, struct Qdisc
*q
,
1219 unsigned long cl
, int ifindex
,
1221 struct netlink_ext_ack
*extack
)
1223 struct tcf_block
*block
;
1225 if (ifindex
== TCM_IFINDEX_MAGIC_BLOCK
) {
1226 block
= tcf_block_refcnt_get(net
, block_index
);
1228 NL_SET_ERR_MSG(extack
, "Block of given index was not found");
1229 return ERR_PTR(-EINVAL
);
1232 const struct Qdisc_class_ops
*cops
= q
->ops
->cl_ops
;
1234 block
= cops
->tcf_block(q
, cl
, extack
);
1236 return ERR_PTR(-EINVAL
);
1238 if (tcf_block_shared(block
)) {
1239 NL_SET_ERR_MSG(extack
, "This filter block is shared. Please use the block index to manipulate the filters");
1240 return ERR_PTR(-EOPNOTSUPP
);
1243 /* Always take reference to block in order to support execution
1244 * of rules update path of cls API without rtnl lock. Caller
1245 * must release block when it is finished using it. 'if' block
1246 * of this conditional obtain reference to block by calling
1247 * tcf_block_refcnt_get().
1249 refcount_inc(&block
->refcnt
);
1255 static void __tcf_block_put(struct tcf_block
*block
, struct Qdisc
*q
,
1256 struct tcf_block_ext_info
*ei
, bool rtnl_held
)
1258 if (refcount_dec_and_mutex_lock(&block
->refcnt
, &block
->lock
)) {
1259 /* Flushing/putting all chains will cause the block to be
1260 * deallocated when last chain is freed. However, if chain_list
1261 * is empty, block has to be manually deallocated. After block
1262 * reference counter reached 0, it is no longer possible to
1263 * increment it or add new chains to block.
1265 bool free_block
= list_empty(&block
->chain_list
);
1267 mutex_unlock(&block
->lock
);
1268 if (tcf_block_shared(block
))
1269 tcf_block_remove(block
, block
->net
);
1272 tcf_block_offload_unbind(block
, q
, ei
);
1275 tcf_block_destroy(block
);
1277 tcf_block_flush_all_chains(block
, rtnl_held
);
1279 tcf_block_offload_unbind(block
, q
, ei
);
1283 static void tcf_block_refcnt_put(struct tcf_block
*block
, bool rtnl_held
)
1285 __tcf_block_put(block
, NULL
, NULL
, rtnl_held
);
1289 * Set q, parent, cl when appropriate.
1292 static struct tcf_block
*tcf_block_find(struct net
*net
, struct Qdisc
**q
,
1293 u32
*parent
, unsigned long *cl
,
1294 int ifindex
, u32 block_index
,
1295 struct netlink_ext_ack
*extack
)
1297 struct tcf_block
*block
;
1302 err
= __tcf_qdisc_find(net
, q
, parent
, ifindex
, true, extack
);
1306 err
= __tcf_qdisc_cl_find(*q
, *parent
, cl
, ifindex
, extack
);
1310 block
= __tcf_block_find(net
, *q
, *cl
, ifindex
, block_index
, extack
);
1311 if (IS_ERR(block
)) {
1312 err
= PTR_ERR(block
);
1323 return ERR_PTR(err
);
1326 static void tcf_block_release(struct Qdisc
*q
, struct tcf_block
*block
,
1329 if (!IS_ERR_OR_NULL(block
))
1330 tcf_block_refcnt_put(block
, rtnl_held
);
1336 qdisc_put_unlocked(q
);
1340 struct tcf_block_owner_item
{
1341 struct list_head list
;
1343 enum tcf_block_binder_type binder_type
;
1347 tcf_block_owner_netif_keep_dst(struct tcf_block
*block
,
1349 enum tcf_block_binder_type binder_type
)
1351 if (block
->keep_dst
&&
1352 binder_type
!= TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
&&
1353 binder_type
!= TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS
)
1354 netif_keep_dst(qdisc_dev(q
));
1357 void tcf_block_netif_keep_dst(struct tcf_block
*block
)
1359 struct tcf_block_owner_item
*item
;
1361 block
->keep_dst
= true;
1362 list_for_each_entry(item
, &block
->owner_list
, list
)
1363 tcf_block_owner_netif_keep_dst(block
, item
->q
,
1366 EXPORT_SYMBOL(tcf_block_netif_keep_dst
);
1368 static int tcf_block_owner_add(struct tcf_block
*block
,
1370 enum tcf_block_binder_type binder_type
)
1372 struct tcf_block_owner_item
*item
;
1374 item
= kmalloc(sizeof(*item
), GFP_KERNEL
);
1378 item
->binder_type
= binder_type
;
1379 list_add(&item
->list
, &block
->owner_list
);
1383 static void tcf_block_owner_del(struct tcf_block
*block
,
1385 enum tcf_block_binder_type binder_type
)
1387 struct tcf_block_owner_item
*item
;
1389 list_for_each_entry(item
, &block
->owner_list
, list
) {
1390 if (item
->q
== q
&& item
->binder_type
== binder_type
) {
1391 list_del(&item
->list
);
1399 int tcf_block_get_ext(struct tcf_block
**p_block
, struct Qdisc
*q
,
1400 struct tcf_block_ext_info
*ei
,
1401 struct netlink_ext_ack
*extack
)
1403 struct net
*net
= qdisc_net(q
);
1404 struct tcf_block
*block
= NULL
;
1407 if (ei
->block_index
)
1408 /* block_index not 0 means the shared block is requested */
1409 block
= tcf_block_refcnt_get(net
, ei
->block_index
);
1412 block
= tcf_block_create(net
, q
, ei
->block_index
, extack
);
1414 return PTR_ERR(block
);
1415 if (tcf_block_shared(block
)) {
1416 err
= tcf_block_insert(block
, net
, extack
);
1418 goto err_block_insert
;
1422 err
= tcf_block_owner_add(block
, q
, ei
->binder_type
);
1424 goto err_block_owner_add
;
1426 tcf_block_owner_netif_keep_dst(block
, q
, ei
->binder_type
);
1428 err
= tcf_chain0_head_change_cb_add(block
, ei
, extack
);
1430 goto err_chain0_head_change_cb_add
;
1432 err
= tcf_block_offload_bind(block
, q
, ei
, extack
);
1434 goto err_block_offload_bind
;
1439 err_block_offload_bind
:
1440 tcf_chain0_head_change_cb_del(block
, ei
);
1441 err_chain0_head_change_cb_add
:
1442 tcf_block_owner_del(block
, q
, ei
->binder_type
);
1443 err_block_owner_add
:
1445 tcf_block_refcnt_put(block
, true);
1448 EXPORT_SYMBOL(tcf_block_get_ext
);
1450 static void tcf_chain_head_change_dflt(struct tcf_proto
*tp_head
, void *priv
)
1452 struct tcf_proto __rcu
**p_filter_chain
= priv
;
1454 rcu_assign_pointer(*p_filter_chain
, tp_head
);
1457 int tcf_block_get(struct tcf_block
**p_block
,
1458 struct tcf_proto __rcu
**p_filter_chain
, struct Qdisc
*q
,
1459 struct netlink_ext_ack
*extack
)
1461 struct tcf_block_ext_info ei
= {
1462 .chain_head_change
= tcf_chain_head_change_dflt
,
1463 .chain_head_change_priv
= p_filter_chain
,
1466 WARN_ON(!p_filter_chain
);
1467 return tcf_block_get_ext(p_block
, q
, &ei
, extack
);
1469 EXPORT_SYMBOL(tcf_block_get
);
1471 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1472 * actions should be all removed after flushing.
1474 void tcf_block_put_ext(struct tcf_block
*block
, struct Qdisc
*q
,
1475 struct tcf_block_ext_info
*ei
)
1479 tcf_chain0_head_change_cb_del(block
, ei
);
1480 tcf_block_owner_del(block
, q
, ei
->binder_type
);
1482 __tcf_block_put(block
, q
, ei
, true);
1484 EXPORT_SYMBOL(tcf_block_put_ext
);
1486 void tcf_block_put(struct tcf_block
*block
)
1488 struct tcf_block_ext_info ei
= {0, };
1492 tcf_block_put_ext(block
, block
->q
, &ei
);
1495 EXPORT_SYMBOL(tcf_block_put
);
1497 struct tcf_block_cb
{
1498 struct list_head list
;
1502 unsigned int refcnt
;
1505 void *tcf_block_cb_priv(struct tcf_block_cb
*block_cb
)
1507 return block_cb
->cb_priv
;
1509 EXPORT_SYMBOL(tcf_block_cb_priv
);
1511 struct tcf_block_cb
*tcf_block_cb_lookup(struct tcf_block
*block
,
1512 tc_setup_cb_t
*cb
, void *cb_ident
)
1513 { struct tcf_block_cb
*block_cb
;
1515 list_for_each_entry(block_cb
, &block
->cb_list
, list
)
1516 if (block_cb
->cb
== cb
&& block_cb
->cb_ident
== cb_ident
)
1520 EXPORT_SYMBOL(tcf_block_cb_lookup
);
1522 void tcf_block_cb_incref(struct tcf_block_cb
*block_cb
)
1526 EXPORT_SYMBOL(tcf_block_cb_incref
);
1528 unsigned int tcf_block_cb_decref(struct tcf_block_cb
*block_cb
)
1530 return --block_cb
->refcnt
;
1532 EXPORT_SYMBOL(tcf_block_cb_decref
);
1535 tcf_block_playback_offloads(struct tcf_block
*block
, tc_setup_cb_t
*cb
,
1536 void *cb_priv
, bool add
, bool offload_in_use
,
1537 struct netlink_ext_ack
*extack
)
1539 struct tcf_chain
*chain
, *chain_prev
;
1540 struct tcf_proto
*tp
, *tp_prev
;
1543 for (chain
= __tcf_get_next_chain(block
, NULL
);
1546 chain
= __tcf_get_next_chain(block
, chain
),
1547 tcf_chain_put(chain_prev
)) {
1548 for (tp
= __tcf_get_next_proto(chain
, NULL
); tp
;
1550 tp
= __tcf_get_next_proto(chain
, tp
),
1551 tcf_proto_put(tp_prev
, true, NULL
)) {
1552 if (tp
->ops
->reoffload
) {
1553 err
= tp
->ops
->reoffload(tp
, add
, cb
, cb_priv
,
1556 goto err_playback_remove
;
1557 } else if (add
&& offload_in_use
) {
1559 NL_SET_ERR_MSG(extack
, "Filter HW offload failed - classifier without re-offloading support");
1560 goto err_playback_remove
;
1567 err_playback_remove
:
1568 tcf_proto_put(tp
, true, NULL
);
1569 tcf_chain_put(chain
);
1570 tcf_block_playback_offloads(block
, cb
, cb_priv
, false, offload_in_use
,
1575 struct tcf_block_cb
*__tcf_block_cb_register(struct tcf_block
*block
,
1576 tc_setup_cb_t
*cb
, void *cb_ident
,
1578 struct netlink_ext_ack
*extack
)
1580 struct tcf_block_cb
*block_cb
;
1583 /* Replay any already present rules */
1584 err
= tcf_block_playback_offloads(block
, cb
, cb_priv
, true,
1585 tcf_block_offload_in_use(block
),
1588 return ERR_PTR(err
);
1590 block_cb
= kzalloc(sizeof(*block_cb
), GFP_KERNEL
);
1592 return ERR_PTR(-ENOMEM
);
1594 block_cb
->cb_ident
= cb_ident
;
1595 block_cb
->cb_priv
= cb_priv
;
1596 list_add(&block_cb
->list
, &block
->cb_list
);
1599 EXPORT_SYMBOL(__tcf_block_cb_register
);
1601 int tcf_block_cb_register(struct tcf_block
*block
,
1602 tc_setup_cb_t
*cb
, void *cb_ident
,
1603 void *cb_priv
, struct netlink_ext_ack
*extack
)
1605 struct tcf_block_cb
*block_cb
;
1607 block_cb
= __tcf_block_cb_register(block
, cb
, cb_ident
, cb_priv
,
1609 return PTR_ERR_OR_ZERO(block_cb
);
1611 EXPORT_SYMBOL(tcf_block_cb_register
);
1613 void __tcf_block_cb_unregister(struct tcf_block
*block
,
1614 struct tcf_block_cb
*block_cb
)
1616 tcf_block_playback_offloads(block
, block_cb
->cb
, block_cb
->cb_priv
,
1617 false, tcf_block_offload_in_use(block
),
1619 list_del(&block_cb
->list
);
1622 EXPORT_SYMBOL(__tcf_block_cb_unregister
);
1624 void tcf_block_cb_unregister(struct tcf_block
*block
,
1625 tc_setup_cb_t
*cb
, void *cb_ident
)
1627 struct tcf_block_cb
*block_cb
;
1629 block_cb
= tcf_block_cb_lookup(block
, cb
, cb_ident
);
1632 __tcf_block_cb_unregister(block
, block_cb
);
1634 EXPORT_SYMBOL(tcf_block_cb_unregister
);
1636 /* Main classifier routine: scans classifier chain attached
1637 * to this qdisc, (optionally) tests for protocol and asks
1638 * specific classifiers.
1640 int tcf_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
1641 struct tcf_result
*res
, bool compat_mode
)
1643 #ifdef CONFIG_NET_CLS_ACT
1644 const int max_reclassify_loop
= 4;
1645 const struct tcf_proto
*orig_tp
= tp
;
1646 const struct tcf_proto
*first_tp
;
1651 for (; tp
; tp
= rcu_dereference_bh(tp
->next
)) {
1652 __be16 protocol
= tc_skb_protocol(skb
);
1655 if (tp
->protocol
!= protocol
&&
1656 tp
->protocol
!= htons(ETH_P_ALL
))
1659 err
= tp
->classify(skb
, tp
, res
);
1660 #ifdef CONFIG_NET_CLS_ACT
1661 if (unlikely(err
== TC_ACT_RECLASSIFY
&& !compat_mode
)) {
1664 } else if (unlikely(TC_ACT_EXT_CMP(err
, TC_ACT_GOTO_CHAIN
))) {
1665 first_tp
= res
->goto_tp
;
1673 return TC_ACT_UNSPEC
; /* signal: continue lookup */
1674 #ifdef CONFIG_NET_CLS_ACT
1676 if (unlikely(limit
++ >= max_reclassify_loop
)) {
1677 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1678 tp
->chain
->block
->index
,
1680 ntohs(tp
->protocol
));
1688 EXPORT_SYMBOL(tcf_classify
);
1690 struct tcf_chain_info
{
1691 struct tcf_proto __rcu
**pprev
;
1692 struct tcf_proto __rcu
*next
;
1695 static struct tcf_proto
*tcf_chain_tp_prev(struct tcf_chain
*chain
,
1696 struct tcf_chain_info
*chain_info
)
1698 return tcf_chain_dereference(*chain_info
->pprev
, chain
);
1701 static int tcf_chain_tp_insert(struct tcf_chain
*chain
,
1702 struct tcf_chain_info
*chain_info
,
1703 struct tcf_proto
*tp
)
1705 if (chain
->flushing
)
1708 if (*chain_info
->pprev
== chain
->filter_chain
)
1709 tcf_chain0_head_change(chain
, tp
);
1711 RCU_INIT_POINTER(tp
->next
, tcf_chain_tp_prev(chain
, chain_info
));
1712 rcu_assign_pointer(*chain_info
->pprev
, tp
);
1717 static void tcf_chain_tp_remove(struct tcf_chain
*chain
,
1718 struct tcf_chain_info
*chain_info
,
1719 struct tcf_proto
*tp
)
1721 struct tcf_proto
*next
= tcf_chain_dereference(chain_info
->next
, chain
);
1723 tcf_proto_mark_delete(tp
);
1724 if (tp
== chain
->filter_chain
)
1725 tcf_chain0_head_change(chain
, next
);
1726 RCU_INIT_POINTER(*chain_info
->pprev
, next
);
1729 static struct tcf_proto
*tcf_chain_tp_find(struct tcf_chain
*chain
,
1730 struct tcf_chain_info
*chain_info
,
1731 u32 protocol
, u32 prio
,
1732 bool prio_allocate
);
1734 /* Try to insert new proto.
1735 * If proto with specified priority already exists, free new proto
1736 * and return existing one.
1739 static struct tcf_proto
*tcf_chain_tp_insert_unique(struct tcf_chain
*chain
,
1740 struct tcf_proto
*tp_new
,
1741 u32 protocol
, u32 prio
,
1744 struct tcf_chain_info chain_info
;
1745 struct tcf_proto
*tp
;
1748 mutex_lock(&chain
->filter_chain_lock
);
1750 tp
= tcf_chain_tp_find(chain
, &chain_info
,
1751 protocol
, prio
, false);
1753 err
= tcf_chain_tp_insert(chain
, &chain_info
, tp_new
);
1754 mutex_unlock(&chain
->filter_chain_lock
);
1757 tcf_proto_destroy(tp_new
, rtnl_held
, NULL
);
1760 tcf_proto_destroy(tp_new
, rtnl_held
, NULL
);
1761 tp_new
= ERR_PTR(err
);
1767 static void tcf_chain_tp_delete_empty(struct tcf_chain
*chain
,
1768 struct tcf_proto
*tp
, bool rtnl_held
,
1769 struct netlink_ext_ack
*extack
)
1771 struct tcf_chain_info chain_info
;
1772 struct tcf_proto
*tp_iter
;
1773 struct tcf_proto
**pprev
;
1774 struct tcf_proto
*next
;
1776 mutex_lock(&chain
->filter_chain_lock
);
1778 /* Atomically find and remove tp from chain. */
1779 for (pprev
= &chain
->filter_chain
;
1780 (tp_iter
= tcf_chain_dereference(*pprev
, chain
));
1781 pprev
= &tp_iter
->next
) {
1782 if (tp_iter
== tp
) {
1783 chain_info
.pprev
= pprev
;
1784 chain_info
.next
= tp_iter
->next
;
1785 WARN_ON(tp_iter
->deleting
);
1789 /* Verify that tp still exists and no new filters were inserted
1791 * Mark tp for deletion if it is empty.
1793 if (!tp_iter
|| !tcf_proto_check_delete(tp
, rtnl_held
)) {
1794 mutex_unlock(&chain
->filter_chain_lock
);
1798 next
= tcf_chain_dereference(chain_info
.next
, chain
);
1799 if (tp
== chain
->filter_chain
)
1800 tcf_chain0_head_change(chain
, next
);
1801 RCU_INIT_POINTER(*chain_info
.pprev
, next
);
1802 mutex_unlock(&chain
->filter_chain_lock
);
1804 tcf_proto_put(tp
, rtnl_held
, extack
);
1807 static struct tcf_proto
*tcf_chain_tp_find(struct tcf_chain
*chain
,
1808 struct tcf_chain_info
*chain_info
,
1809 u32 protocol
, u32 prio
,
1812 struct tcf_proto
**pprev
;
1813 struct tcf_proto
*tp
;
1815 /* Check the chain for existence of proto-tcf with this priority */
1816 for (pprev
= &chain
->filter_chain
;
1817 (tp
= tcf_chain_dereference(*pprev
, chain
));
1818 pprev
= &tp
->next
) {
1819 if (tp
->prio
>= prio
) {
1820 if (tp
->prio
== prio
) {
1821 if (prio_allocate
||
1822 (tp
->protocol
!= protocol
&& protocol
))
1823 return ERR_PTR(-EINVAL
);
1830 chain_info
->pprev
= pprev
;
1832 chain_info
->next
= tp
->next
;
1835 chain_info
->next
= NULL
;
1840 static int tcf_fill_node(struct net
*net
, struct sk_buff
*skb
,
1841 struct tcf_proto
*tp
, struct tcf_block
*block
,
1842 struct Qdisc
*q
, u32 parent
, void *fh
,
1843 u32 portid
, u32 seq
, u16 flags
, int event
,
1847 struct nlmsghdr
*nlh
;
1848 unsigned char *b
= skb_tail_pointer(skb
);
1850 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*tcm
), flags
);
1852 goto out_nlmsg_trim
;
1853 tcm
= nlmsg_data(nlh
);
1854 tcm
->tcm_family
= AF_UNSPEC
;
1858 tcm
->tcm_ifindex
= qdisc_dev(q
)->ifindex
;
1859 tcm
->tcm_parent
= parent
;
1861 tcm
->tcm_ifindex
= TCM_IFINDEX_MAGIC_BLOCK
;
1862 tcm
->tcm_block_index
= block
->index
;
1864 tcm
->tcm_info
= TC_H_MAKE(tp
->prio
, tp
->protocol
);
1865 if (nla_put_string(skb
, TCA_KIND
, tp
->ops
->kind
))
1866 goto nla_put_failure
;
1867 if (nla_put_u32(skb
, TCA_CHAIN
, tp
->chain
->index
))
1868 goto nla_put_failure
;
1870 tcm
->tcm_handle
= 0;
1872 if (tp
->ops
->dump
&&
1873 tp
->ops
->dump(net
, tp
, fh
, skb
, tcm
, rtnl_held
) < 0)
1874 goto nla_put_failure
;
1876 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1885 static int tfilter_notify(struct net
*net
, struct sk_buff
*oskb
,
1886 struct nlmsghdr
*n
, struct tcf_proto
*tp
,
1887 struct tcf_block
*block
, struct Qdisc
*q
,
1888 u32 parent
, void *fh
, int event
, bool unicast
,
1891 struct sk_buff
*skb
;
1892 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
1895 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1899 if (tcf_fill_node(net
, skb
, tp
, block
, q
, parent
, fh
, portid
,
1900 n
->nlmsg_seq
, n
->nlmsg_flags
, event
,
1907 err
= netlink_unicast(net
->rtnl
, skb
, portid
, MSG_DONTWAIT
);
1909 err
= rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1910 n
->nlmsg_flags
& NLM_F_ECHO
);
1917 static int tfilter_del_notify(struct net
*net
, struct sk_buff
*oskb
,
1918 struct nlmsghdr
*n
, struct tcf_proto
*tp
,
1919 struct tcf_block
*block
, struct Qdisc
*q
,
1920 u32 parent
, void *fh
, bool unicast
, bool *last
,
1921 bool rtnl_held
, struct netlink_ext_ack
*extack
)
1923 struct sk_buff
*skb
;
1924 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
1927 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1931 if (tcf_fill_node(net
, skb
, tp
, block
, q
, parent
, fh
, portid
,
1932 n
->nlmsg_seq
, n
->nlmsg_flags
, RTM_DELTFILTER
,
1934 NL_SET_ERR_MSG(extack
, "Failed to build del event notification");
1939 err
= tp
->ops
->delete(tp
, fh
, last
, rtnl_held
, extack
);
1946 err
= netlink_unicast(net
->rtnl
, skb
, portid
, MSG_DONTWAIT
);
1948 err
= rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1949 n
->nlmsg_flags
& NLM_F_ECHO
);
1951 NL_SET_ERR_MSG(extack
, "Failed to send filter delete notification");
1958 static void tfilter_notify_chain(struct net
*net
, struct sk_buff
*oskb
,
1959 struct tcf_block
*block
, struct Qdisc
*q
,
1960 u32 parent
, struct nlmsghdr
*n
,
1961 struct tcf_chain
*chain
, int event
,
1964 struct tcf_proto
*tp
;
1966 for (tp
= tcf_get_next_proto(chain
, NULL
, rtnl_held
);
1967 tp
; tp
= tcf_get_next_proto(chain
, tp
, rtnl_held
))
1968 tfilter_notify(net
, oskb
, n
, tp
, block
,
1969 q
, parent
, NULL
, event
, false, rtnl_held
);
1972 static void tfilter_put(struct tcf_proto
*tp
, void *fh
)
1974 if (tp
->ops
->put
&& fh
)
1975 tp
->ops
->put(tp
, fh
);
1978 static int tc_new_tfilter(struct sk_buff
*skb
, struct nlmsghdr
*n
,
1979 struct netlink_ext_ack
*extack
)
1981 struct net
*net
= sock_net(skb
->sk
);
1982 struct nlattr
*tca
[TCA_MAX
+ 1];
1989 struct Qdisc
*q
= NULL
;
1990 struct tcf_chain_info chain_info
;
1991 struct tcf_chain
*chain
= NULL
;
1992 struct tcf_block
*block
;
1993 struct tcf_proto
*tp
;
1998 bool rtnl_held
= false;
2000 if (!netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
2006 err
= nlmsg_parse_deprecated(n
, sizeof(*t
), tca
, TCA_MAX
,
2007 rtm_tca_policy
, extack
);
2012 protocol
= TC_H_MIN(t
->tcm_info
);
2013 prio
= TC_H_MAJ(t
->tcm_info
);
2014 prio_allocate
= false;
2015 parent
= t
->tcm_parent
;
2021 /* If no priority is provided by the user,
2024 if (n
->nlmsg_flags
& NLM_F_CREATE
) {
2025 prio
= TC_H_MAKE(0x80000000U
, 0U);
2026 prio_allocate
= true;
2028 NL_SET_ERR_MSG(extack
, "Invalid filter command with priority of zero");
2033 /* Find head of filter chain. */
2035 err
= __tcf_qdisc_find(net
, &q
, &parent
, t
->tcm_ifindex
, false, extack
);
2039 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2040 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2041 * type is not specified, classifier is not unlocked.
2044 (q
&& !(q
->ops
->cl_ops
->flags
& QDISC_CLASS_OPS_DOIT_UNLOCKED
)) ||
2045 !tca
[TCA_KIND
] || !tcf_proto_is_unlocked(nla_data(tca
[TCA_KIND
]))) {
2050 err
= __tcf_qdisc_cl_find(q
, parent
, &cl
, t
->tcm_ifindex
, extack
);
2054 block
= __tcf_block_find(net
, q
, cl
, t
->tcm_ifindex
, t
->tcm_block_index
,
2056 if (IS_ERR(block
)) {
2057 err
= PTR_ERR(block
);
2061 chain_index
= tca
[TCA_CHAIN
] ? nla_get_u32(tca
[TCA_CHAIN
]) : 0;
2062 if (chain_index
> TC_ACT_EXT_VAL_MASK
) {
2063 NL_SET_ERR_MSG(extack
, "Specified chain index exceeds upper limit");
2067 chain
= tcf_chain_get(block
, chain_index
, true);
2069 NL_SET_ERR_MSG(extack
, "Cannot create specified filter chain");
2074 mutex_lock(&chain
->filter_chain_lock
);
2075 tp
= tcf_chain_tp_find(chain
, &chain_info
, protocol
,
2076 prio
, prio_allocate
);
2078 NL_SET_ERR_MSG(extack
, "Filter with specified priority/protocol not found");
2084 struct tcf_proto
*tp_new
= NULL
;
2086 if (chain
->flushing
) {
2091 /* Proto-tcf does not exist, create new one */
2093 if (tca
[TCA_KIND
] == NULL
|| !protocol
) {
2094 NL_SET_ERR_MSG(extack
, "Filter kind and protocol must be specified");
2099 if (!(n
->nlmsg_flags
& NLM_F_CREATE
)) {
2100 NL_SET_ERR_MSG(extack
, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2106 prio
= tcf_auto_prio(tcf_chain_tp_prev(chain
,
2109 mutex_unlock(&chain
->filter_chain_lock
);
2110 tp_new
= tcf_proto_create(nla_data(tca
[TCA_KIND
]),
2111 protocol
, prio
, chain
, rtnl_held
,
2113 if (IS_ERR(tp_new
)) {
2114 err
= PTR_ERR(tp_new
);
2119 tp
= tcf_chain_tp_insert_unique(chain
, tp_new
, protocol
, prio
,
2126 mutex_unlock(&chain
->filter_chain_lock
);
2129 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], tp
->ops
->kind
)) {
2130 NL_SET_ERR_MSG(extack
, "Specified filter kind does not match existing one");
2135 fh
= tp
->ops
->get(tp
, t
->tcm_handle
);
2138 if (!(n
->nlmsg_flags
& NLM_F_CREATE
)) {
2139 NL_SET_ERR_MSG(extack
, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2143 } else if (n
->nlmsg_flags
& NLM_F_EXCL
) {
2144 tfilter_put(tp
, fh
);
2145 NL_SET_ERR_MSG(extack
, "Filter already exists");
2150 if (chain
->tmplt_ops
&& chain
->tmplt_ops
!= tp
->ops
) {
2151 NL_SET_ERR_MSG(extack
, "Chain template is set to a different filter kind");
2156 err
= tp
->ops
->change(net
, skb
, tp
, cl
, t
->tcm_handle
, tca
, &fh
,
2157 n
->nlmsg_flags
& NLM_F_CREATE
? TCA_ACT_NOREPLACE
: TCA_ACT_REPLACE
,
2160 tfilter_notify(net
, skb
, n
, tp
, block
, q
, parent
, fh
,
2161 RTM_NEWTFILTER
, false, rtnl_held
);
2162 tfilter_put(tp
, fh
);
2166 if (err
&& tp_created
)
2167 tcf_chain_tp_delete_empty(chain
, tp
, rtnl_held
, NULL
);
2170 if (tp
&& !IS_ERR(tp
))
2171 tcf_proto_put(tp
, rtnl_held
, NULL
);
2173 tcf_chain_put(chain
);
2175 tcf_block_release(q
, block
, rtnl_held
);
2180 if (err
== -EAGAIN
) {
2181 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2185 /* Replay the request. */
2191 mutex_unlock(&chain
->filter_chain_lock
);
2195 static int tc_del_tfilter(struct sk_buff
*skb
, struct nlmsghdr
*n
,
2196 struct netlink_ext_ack
*extack
)
2198 struct net
*net
= sock_net(skb
->sk
);
2199 struct nlattr
*tca
[TCA_MAX
+ 1];
2205 struct Qdisc
*q
= NULL
;
2206 struct tcf_chain_info chain_info
;
2207 struct tcf_chain
*chain
= NULL
;
2208 struct tcf_block
*block
= NULL
;
2209 struct tcf_proto
*tp
= NULL
;
2210 unsigned long cl
= 0;
2213 bool rtnl_held
= false;
2215 if (!netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
2218 err
= nlmsg_parse_deprecated(n
, sizeof(*t
), tca
, TCA_MAX
,
2219 rtm_tca_policy
, extack
);
2224 protocol
= TC_H_MIN(t
->tcm_info
);
2225 prio
= TC_H_MAJ(t
->tcm_info
);
2226 parent
= t
->tcm_parent
;
2228 if (prio
== 0 && (protocol
|| t
->tcm_handle
|| tca
[TCA_KIND
])) {
2229 NL_SET_ERR_MSG(extack
, "Cannot flush filters with protocol, handle or kind set");
2233 /* Find head of filter chain. */
2235 err
= __tcf_qdisc_find(net
, &q
, &parent
, t
->tcm_ifindex
, false, extack
);
2239 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2240 * found), qdisc is not unlocked, classifier type is not specified,
2241 * classifier is not unlocked.
2244 (q
&& !(q
->ops
->cl_ops
->flags
& QDISC_CLASS_OPS_DOIT_UNLOCKED
)) ||
2245 !tca
[TCA_KIND
] || !tcf_proto_is_unlocked(nla_data(tca
[TCA_KIND
]))) {
2250 err
= __tcf_qdisc_cl_find(q
, parent
, &cl
, t
->tcm_ifindex
, extack
);
2254 block
= __tcf_block_find(net
, q
, cl
, t
->tcm_ifindex
, t
->tcm_block_index
,
2256 if (IS_ERR(block
)) {
2257 err
= PTR_ERR(block
);
2261 chain_index
= tca
[TCA_CHAIN
] ? nla_get_u32(tca
[TCA_CHAIN
]) : 0;
2262 if (chain_index
> TC_ACT_EXT_VAL_MASK
) {
2263 NL_SET_ERR_MSG(extack
, "Specified chain index exceeds upper limit");
2267 chain
= tcf_chain_get(block
, chain_index
, false);
2269 /* User requested flush on non-existent chain. Nothing to do,
2270 * so just return success.
2276 NL_SET_ERR_MSG(extack
, "Cannot find specified filter chain");
2282 tfilter_notify_chain(net
, skb
, block
, q
, parent
, n
,
2283 chain
, RTM_DELTFILTER
, rtnl_held
);
2284 tcf_chain_flush(chain
, rtnl_held
);
2289 mutex_lock(&chain
->filter_chain_lock
);
2290 tp
= tcf_chain_tp_find(chain
, &chain_info
, protocol
,
2292 if (!tp
|| IS_ERR(tp
)) {
2293 NL_SET_ERR_MSG(extack
, "Filter with specified priority/protocol not found");
2294 err
= tp
? PTR_ERR(tp
) : -ENOENT
;
2296 } else if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], tp
->ops
->kind
)) {
2297 NL_SET_ERR_MSG(extack
, "Specified filter kind does not match existing one");
2300 } else if (t
->tcm_handle
== 0) {
2301 tcf_chain_tp_remove(chain
, &chain_info
, tp
);
2302 mutex_unlock(&chain
->filter_chain_lock
);
2304 tcf_proto_put(tp
, rtnl_held
, NULL
);
2305 tfilter_notify(net
, skb
, n
, tp
, block
, q
, parent
, fh
,
2306 RTM_DELTFILTER
, false, rtnl_held
);
2310 mutex_unlock(&chain
->filter_chain_lock
);
2312 fh
= tp
->ops
->get(tp
, t
->tcm_handle
);
2315 NL_SET_ERR_MSG(extack
, "Specified filter handle not found");
2320 err
= tfilter_del_notify(net
, skb
, n
, tp
, block
,
2321 q
, parent
, fh
, false, &last
,
2327 tcf_chain_tp_delete_empty(chain
, tp
, rtnl_held
, extack
);
2332 if (tp
&& !IS_ERR(tp
))
2333 tcf_proto_put(tp
, rtnl_held
, NULL
);
2334 tcf_chain_put(chain
);
2336 tcf_block_release(q
, block
, rtnl_held
);
2344 mutex_unlock(&chain
->filter_chain_lock
);
2348 static int tc_get_tfilter(struct sk_buff
*skb
, struct nlmsghdr
*n
,
2349 struct netlink_ext_ack
*extack
)
2351 struct net
*net
= sock_net(skb
->sk
);
2352 struct nlattr
*tca
[TCA_MAX
+ 1];
2358 struct Qdisc
*q
= NULL
;
2359 struct tcf_chain_info chain_info
;
2360 struct tcf_chain
*chain
= NULL
;
2361 struct tcf_block
*block
= NULL
;
2362 struct tcf_proto
*tp
= NULL
;
2363 unsigned long cl
= 0;
2366 bool rtnl_held
= false;
2368 err
= nlmsg_parse_deprecated(n
, sizeof(*t
), tca
, TCA_MAX
,
2369 rtm_tca_policy
, extack
);
2374 protocol
= TC_H_MIN(t
->tcm_info
);
2375 prio
= TC_H_MAJ(t
->tcm_info
);
2376 parent
= t
->tcm_parent
;
2379 NL_SET_ERR_MSG(extack
, "Invalid filter command with priority of zero");
2383 /* Find head of filter chain. */
2385 err
= __tcf_qdisc_find(net
, &q
, &parent
, t
->tcm_ifindex
, false, extack
);
2389 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2390 * unlocked, classifier type is not specified, classifier is not
2393 if ((q
&& !(q
->ops
->cl_ops
->flags
& QDISC_CLASS_OPS_DOIT_UNLOCKED
)) ||
2394 !tca
[TCA_KIND
] || !tcf_proto_is_unlocked(nla_data(tca
[TCA_KIND
]))) {
2399 err
= __tcf_qdisc_cl_find(q
, parent
, &cl
, t
->tcm_ifindex
, extack
);
2403 block
= __tcf_block_find(net
, q
, cl
, t
->tcm_ifindex
, t
->tcm_block_index
,
2405 if (IS_ERR(block
)) {
2406 err
= PTR_ERR(block
);
2410 chain_index
= tca
[TCA_CHAIN
] ? nla_get_u32(tca
[TCA_CHAIN
]) : 0;
2411 if (chain_index
> TC_ACT_EXT_VAL_MASK
) {
2412 NL_SET_ERR_MSG(extack
, "Specified chain index exceeds upper limit");
2416 chain
= tcf_chain_get(block
, chain_index
, false);
2418 NL_SET_ERR_MSG(extack
, "Cannot find specified filter chain");
2423 mutex_lock(&chain
->filter_chain_lock
);
2424 tp
= tcf_chain_tp_find(chain
, &chain_info
, protocol
,
2426 mutex_unlock(&chain
->filter_chain_lock
);
2427 if (!tp
|| IS_ERR(tp
)) {
2428 NL_SET_ERR_MSG(extack
, "Filter with specified priority/protocol not found");
2429 err
= tp
? PTR_ERR(tp
) : -ENOENT
;
2431 } else if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], tp
->ops
->kind
)) {
2432 NL_SET_ERR_MSG(extack
, "Specified filter kind does not match existing one");
2437 fh
= tp
->ops
->get(tp
, t
->tcm_handle
);
2440 NL_SET_ERR_MSG(extack
, "Specified filter handle not found");
2443 err
= tfilter_notify(net
, skb
, n
, tp
, block
, q
, parent
,
2444 fh
, RTM_NEWTFILTER
, true, rtnl_held
);
2446 NL_SET_ERR_MSG(extack
, "Failed to send filter notify message");
2449 tfilter_put(tp
, fh
);
2452 if (tp
&& !IS_ERR(tp
))
2453 tcf_proto_put(tp
, rtnl_held
, NULL
);
2454 tcf_chain_put(chain
);
2456 tcf_block_release(q
, block
, rtnl_held
);
2464 struct tcf_dump_args
{
2465 struct tcf_walker w
;
2466 struct sk_buff
*skb
;
2467 struct netlink_callback
*cb
;
2468 struct tcf_block
*block
;
2473 static int tcf_node_dump(struct tcf_proto
*tp
, void *n
, struct tcf_walker
*arg
)
2475 struct tcf_dump_args
*a
= (void *)arg
;
2476 struct net
*net
= sock_net(a
->skb
->sk
);
2478 return tcf_fill_node(net
, a
->skb
, tp
, a
->block
, a
->q
, a
->parent
,
2479 n
, NETLINK_CB(a
->cb
->skb
).portid
,
2480 a
->cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
2481 RTM_NEWTFILTER
, true);
2484 static bool tcf_chain_dump(struct tcf_chain
*chain
, struct Qdisc
*q
, u32 parent
,
2485 struct sk_buff
*skb
, struct netlink_callback
*cb
,
2486 long index_start
, long *p_index
)
2488 struct net
*net
= sock_net(skb
->sk
);
2489 struct tcf_block
*block
= chain
->block
;
2490 struct tcmsg
*tcm
= nlmsg_data(cb
->nlh
);
2491 struct tcf_proto
*tp
, *tp_prev
;
2492 struct tcf_dump_args arg
;
2494 for (tp
= __tcf_get_next_proto(chain
, NULL
);
2497 tp
= __tcf_get_next_proto(chain
, tp
),
2498 tcf_proto_put(tp_prev
, true, NULL
),
2500 if (*p_index
< index_start
)
2502 if (TC_H_MAJ(tcm
->tcm_info
) &&
2503 TC_H_MAJ(tcm
->tcm_info
) != tp
->prio
)
2505 if (TC_H_MIN(tcm
->tcm_info
) &&
2506 TC_H_MIN(tcm
->tcm_info
) != tp
->protocol
)
2508 if (*p_index
> index_start
)
2509 memset(&cb
->args
[1], 0,
2510 sizeof(cb
->args
) - sizeof(cb
->args
[0]));
2511 if (cb
->args
[1] == 0) {
2512 if (tcf_fill_node(net
, skb
, tp
, block
, q
, parent
, NULL
,
2513 NETLINK_CB(cb
->skb
).portid
,
2514 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
2515 RTM_NEWTFILTER
, true) <= 0)
2521 arg
.w
.fn
= tcf_node_dump
;
2526 arg
.parent
= parent
;
2528 arg
.w
.skip
= cb
->args
[1] - 1;
2530 arg
.w
.cookie
= cb
->args
[2];
2531 tp
->ops
->walk(tp
, &arg
.w
, true);
2532 cb
->args
[2] = arg
.w
.cookie
;
2533 cb
->args
[1] = arg
.w
.count
+ 1;
2540 tcf_proto_put(tp
, true, NULL
);
2544 /* called with RTNL */
2545 static int tc_dump_tfilter(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2547 struct tcf_chain
*chain
, *chain_prev
;
2548 struct net
*net
= sock_net(skb
->sk
);
2549 struct nlattr
*tca
[TCA_MAX
+ 1];
2550 struct Qdisc
*q
= NULL
;
2551 struct tcf_block
*block
;
2552 struct tcmsg
*tcm
= nlmsg_data(cb
->nlh
);
2558 if (nlmsg_len(cb
->nlh
) < sizeof(*tcm
))
2561 err
= nlmsg_parse_deprecated(cb
->nlh
, sizeof(*tcm
), tca
, TCA_MAX
,
2566 if (tcm
->tcm_ifindex
== TCM_IFINDEX_MAGIC_BLOCK
) {
2567 block
= tcf_block_refcnt_get(net
, tcm
->tcm_block_index
);
2570 /* If we work with block index, q is NULL and parent value
2571 * will never be used in the following code. The check
2572 * in tcf_fill_node prevents it. However, compiler does not
2573 * see that far, so set parent to zero to silence the warning
2574 * about parent being uninitialized.
2578 const struct Qdisc_class_ops
*cops
;
2579 struct net_device
*dev
;
2580 unsigned long cl
= 0;
2582 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
2586 parent
= tcm
->tcm_parent
;
2591 q
= qdisc_lookup(dev
, TC_H_MAJ(tcm
->tcm_parent
));
2595 cops
= q
->ops
->cl_ops
;
2598 if (!cops
->tcf_block
)
2600 if (TC_H_MIN(tcm
->tcm_parent
)) {
2601 cl
= cops
->find(q
, tcm
->tcm_parent
);
2605 block
= cops
->tcf_block(q
, cl
, NULL
);
2608 if (tcf_block_shared(block
))
2612 index_start
= cb
->args
[0];
2615 for (chain
= __tcf_get_next_chain(block
, NULL
);
2618 chain
= __tcf_get_next_chain(block
, chain
),
2619 tcf_chain_put(chain_prev
)) {
2620 if (tca
[TCA_CHAIN
] &&
2621 nla_get_u32(tca
[TCA_CHAIN
]) != chain
->index
)
2623 if (!tcf_chain_dump(chain
, q
, parent
, skb
, cb
,
2624 index_start
, &index
)) {
2625 tcf_chain_put(chain
);
2631 if (tcm
->tcm_ifindex
== TCM_IFINDEX_MAGIC_BLOCK
)
2632 tcf_block_refcnt_put(block
, true);
2633 cb
->args
[0] = index
;
2636 /* If we did no progress, the error (EMSGSIZE) is real */
2637 if (skb
->len
== 0 && err
)
2642 static int tc_chain_fill_node(const struct tcf_proto_ops
*tmplt_ops
,
2643 void *tmplt_priv
, u32 chain_index
,
2644 struct net
*net
, struct sk_buff
*skb
,
2645 struct tcf_block
*block
,
2646 u32 portid
, u32 seq
, u16 flags
, int event
)
2648 unsigned char *b
= skb_tail_pointer(skb
);
2649 const struct tcf_proto_ops
*ops
;
2650 struct nlmsghdr
*nlh
;
2657 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*tcm
), flags
);
2659 goto out_nlmsg_trim
;
2660 tcm
= nlmsg_data(nlh
);
2661 tcm
->tcm_family
= AF_UNSPEC
;
2664 tcm
->tcm_handle
= 0;
2666 tcm
->tcm_ifindex
= qdisc_dev(block
->q
)->ifindex
;
2667 tcm
->tcm_parent
= block
->q
->handle
;
2669 tcm
->tcm_ifindex
= TCM_IFINDEX_MAGIC_BLOCK
;
2670 tcm
->tcm_block_index
= block
->index
;
2673 if (nla_put_u32(skb
, TCA_CHAIN
, chain_index
))
2674 goto nla_put_failure
;
2677 if (nla_put_string(skb
, TCA_KIND
, ops
->kind
))
2678 goto nla_put_failure
;
2679 if (ops
->tmplt_dump(skb
, net
, priv
) < 0)
2680 goto nla_put_failure
;
2683 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
2692 static int tc_chain_notify(struct tcf_chain
*chain
, struct sk_buff
*oskb
,
2693 u32 seq
, u16 flags
, int event
, bool unicast
)
2695 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
2696 struct tcf_block
*block
= chain
->block
;
2697 struct net
*net
= block
->net
;
2698 struct sk_buff
*skb
;
2701 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
2705 if (tc_chain_fill_node(chain
->tmplt_ops
, chain
->tmplt_priv
,
2706 chain
->index
, net
, skb
, block
, portid
,
2707 seq
, flags
, event
) <= 0) {
2713 err
= netlink_unicast(net
->rtnl
, skb
, portid
, MSG_DONTWAIT
);
2715 err
= rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
2716 flags
& NLM_F_ECHO
);
2723 static int tc_chain_notify_delete(const struct tcf_proto_ops
*tmplt_ops
,
2724 void *tmplt_priv
, u32 chain_index
,
2725 struct tcf_block
*block
, struct sk_buff
*oskb
,
2726 u32 seq
, u16 flags
, bool unicast
)
2728 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
2729 struct net
*net
= block
->net
;
2730 struct sk_buff
*skb
;
2732 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
2736 if (tc_chain_fill_node(tmplt_ops
, tmplt_priv
, chain_index
, net
, skb
,
2737 block
, portid
, seq
, flags
, RTM_DELCHAIN
) <= 0) {
2743 return netlink_unicast(net
->rtnl
, skb
, portid
, MSG_DONTWAIT
);
2745 return rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
, flags
& NLM_F_ECHO
);
2748 static int tc_chain_tmplt_add(struct tcf_chain
*chain
, struct net
*net
,
2749 struct nlattr
**tca
,
2750 struct netlink_ext_ack
*extack
)
2752 const struct tcf_proto_ops
*ops
;
2755 /* If kind is not set, user did not specify template. */
2759 ops
= tcf_proto_lookup_ops(nla_data(tca
[TCA_KIND
]), true, extack
);
2761 return PTR_ERR(ops
);
2762 if (!ops
->tmplt_create
|| !ops
->tmplt_destroy
|| !ops
->tmplt_dump
) {
2763 NL_SET_ERR_MSG(extack
, "Chain templates are not supported with specified classifier");
2767 tmplt_priv
= ops
->tmplt_create(net
, chain
, tca
, extack
);
2768 if (IS_ERR(tmplt_priv
)) {
2769 module_put(ops
->owner
);
2770 return PTR_ERR(tmplt_priv
);
2772 chain
->tmplt_ops
= ops
;
2773 chain
->tmplt_priv
= tmplt_priv
;
2777 static void tc_chain_tmplt_del(const struct tcf_proto_ops
*tmplt_ops
,
2780 /* If template ops are set, no work to do for us. */
2784 tmplt_ops
->tmplt_destroy(tmplt_priv
);
2785 module_put(tmplt_ops
->owner
);
2788 /* Add/delete/get a chain */
2790 static int tc_ctl_chain(struct sk_buff
*skb
, struct nlmsghdr
*n
,
2791 struct netlink_ext_ack
*extack
)
2793 struct net
*net
= sock_net(skb
->sk
);
2794 struct nlattr
*tca
[TCA_MAX
+ 1];
2798 struct Qdisc
*q
= NULL
;
2799 struct tcf_chain
*chain
= NULL
;
2800 struct tcf_block
*block
;
2804 if (n
->nlmsg_type
!= RTM_GETCHAIN
&&
2805 !netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
2809 err
= nlmsg_parse_deprecated(n
, sizeof(*t
), tca
, TCA_MAX
,
2810 rtm_tca_policy
, extack
);
2815 parent
= t
->tcm_parent
;
2818 block
= tcf_block_find(net
, &q
, &parent
, &cl
,
2819 t
->tcm_ifindex
, t
->tcm_block_index
, extack
);
2821 return PTR_ERR(block
);
2823 chain_index
= tca
[TCA_CHAIN
] ? nla_get_u32(tca
[TCA_CHAIN
]) : 0;
2824 if (chain_index
> TC_ACT_EXT_VAL_MASK
) {
2825 NL_SET_ERR_MSG(extack
, "Specified chain index exceeds upper limit");
2830 mutex_lock(&block
->lock
);
2831 chain
= tcf_chain_lookup(block
, chain_index
);
2832 if (n
->nlmsg_type
== RTM_NEWCHAIN
) {
2834 if (tcf_chain_held_by_acts_only(chain
)) {
2835 /* The chain exists only because there is
2836 * some action referencing it.
2838 tcf_chain_hold(chain
);
2840 NL_SET_ERR_MSG(extack
, "Filter chain already exists");
2842 goto errout_block_locked
;
2845 if (!(n
->nlmsg_flags
& NLM_F_CREATE
)) {
2846 NL_SET_ERR_MSG(extack
, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2848 goto errout_block_locked
;
2850 chain
= tcf_chain_create(block
, chain_index
);
2852 NL_SET_ERR_MSG(extack
, "Failed to create filter chain");
2854 goto errout_block_locked
;
2858 if (!chain
|| tcf_chain_held_by_acts_only(chain
)) {
2859 NL_SET_ERR_MSG(extack
, "Cannot find specified filter chain");
2861 goto errout_block_locked
;
2863 tcf_chain_hold(chain
);
2866 if (n
->nlmsg_type
== RTM_NEWCHAIN
) {
2867 /* Modifying chain requires holding parent block lock. In case
2868 * the chain was successfully added, take a reference to the
2869 * chain. This ensures that an empty chain does not disappear at
2870 * the end of this function.
2872 tcf_chain_hold(chain
);
2873 chain
->explicitly_created
= true;
2875 mutex_unlock(&block
->lock
);
2877 switch (n
->nlmsg_type
) {
2879 err
= tc_chain_tmplt_add(chain
, net
, tca
, extack
);
2881 tcf_chain_put_explicitly_created(chain
);
2885 tc_chain_notify(chain
, NULL
, 0, NLM_F_CREATE
| NLM_F_EXCL
,
2886 RTM_NEWCHAIN
, false);
2889 tfilter_notify_chain(net
, skb
, block
, q
, parent
, n
,
2890 chain
, RTM_DELTFILTER
, true);
2891 /* Flush the chain first as the user requested chain removal. */
2892 tcf_chain_flush(chain
, true);
2893 /* In case the chain was successfully deleted, put a reference
2894 * to the chain previously taken during addition.
2896 tcf_chain_put_explicitly_created(chain
);
2899 err
= tc_chain_notify(chain
, skb
, n
->nlmsg_seq
,
2900 n
->nlmsg_seq
, n
->nlmsg_type
, true);
2902 NL_SET_ERR_MSG(extack
, "Failed to send chain notify message");
2906 NL_SET_ERR_MSG(extack
, "Unsupported message type");
2911 tcf_chain_put(chain
);
2913 tcf_block_release(q
, block
, true);
2915 /* Replay the request. */
2919 errout_block_locked
:
2920 mutex_unlock(&block
->lock
);
2924 /* called with RTNL */
2925 static int tc_dump_chain(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2927 struct net
*net
= sock_net(skb
->sk
);
2928 struct nlattr
*tca
[TCA_MAX
+ 1];
2929 struct Qdisc
*q
= NULL
;
2930 struct tcf_block
*block
;
2931 struct tcmsg
*tcm
= nlmsg_data(cb
->nlh
);
2932 struct tcf_chain
*chain
;
2938 if (nlmsg_len(cb
->nlh
) < sizeof(*tcm
))
2941 err
= nlmsg_parse_deprecated(cb
->nlh
, sizeof(*tcm
), tca
, TCA_MAX
,
2942 rtm_tca_policy
, cb
->extack
);
2946 if (tcm
->tcm_ifindex
== TCM_IFINDEX_MAGIC_BLOCK
) {
2947 block
= tcf_block_refcnt_get(net
, tcm
->tcm_block_index
);
2950 /* If we work with block index, q is NULL and parent value
2951 * will never be used in the following code. The check
2952 * in tcf_fill_node prevents it. However, compiler does not
2953 * see that far, so set parent to zero to silence the warning
2954 * about parent being uninitialized.
2958 const struct Qdisc_class_ops
*cops
;
2959 struct net_device
*dev
;
2960 unsigned long cl
= 0;
2962 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
2966 parent
= tcm
->tcm_parent
;
2971 q
= qdisc_lookup(dev
, TC_H_MAJ(tcm
->tcm_parent
));
2975 cops
= q
->ops
->cl_ops
;
2978 if (!cops
->tcf_block
)
2980 if (TC_H_MIN(tcm
->tcm_parent
)) {
2981 cl
= cops
->find(q
, tcm
->tcm_parent
);
2985 block
= cops
->tcf_block(q
, cl
, NULL
);
2988 if (tcf_block_shared(block
))
2992 index_start
= cb
->args
[0];
2995 mutex_lock(&block
->lock
);
2996 list_for_each_entry(chain
, &block
->chain_list
, list
) {
2997 if ((tca
[TCA_CHAIN
] &&
2998 nla_get_u32(tca
[TCA_CHAIN
]) != chain
->index
))
3000 if (index
< index_start
) {
3004 if (tcf_chain_held_by_acts_only(chain
))
3006 err
= tc_chain_fill_node(chain
->tmplt_ops
, chain
->tmplt_priv
,
3007 chain
->index
, net
, skb
, block
,
3008 NETLINK_CB(cb
->skb
).portid
,
3009 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
3015 mutex_unlock(&block
->lock
);
3017 if (tcm
->tcm_ifindex
== TCM_IFINDEX_MAGIC_BLOCK
)
3018 tcf_block_refcnt_put(block
, true);
3019 cb
->args
[0] = index
;
3022 /* If we did no progress, the error (EMSGSIZE) is real */
3023 if (skb
->len
== 0 && err
)
3028 void tcf_exts_destroy(struct tcf_exts
*exts
)
3030 #ifdef CONFIG_NET_CLS_ACT
3031 tcf_action_destroy(exts
->actions
, TCA_ACT_UNBIND
);
3032 kfree(exts
->actions
);
3033 exts
->nr_actions
= 0;
3036 EXPORT_SYMBOL(tcf_exts_destroy
);
3038 int tcf_exts_validate(struct net
*net
, struct tcf_proto
*tp
, struct nlattr
**tb
,
3039 struct nlattr
*rate_tlv
, struct tcf_exts
*exts
, bool ovr
,
3040 bool rtnl_held
, struct netlink_ext_ack
*extack
)
3042 #ifdef CONFIG_NET_CLS_ACT
3044 struct tc_action
*act
;
3045 size_t attr_size
= 0;
3047 if (exts
->police
&& tb
[exts
->police
]) {
3048 act
= tcf_action_init_1(net
, tp
, tb
[exts
->police
],
3049 rate_tlv
, "police", ovr
,
3050 TCA_ACT_BIND
, rtnl_held
,
3053 return PTR_ERR(act
);
3055 act
->type
= exts
->type
= TCA_OLD_COMPAT
;
3056 exts
->actions
[0] = act
;
3057 exts
->nr_actions
= 1;
3058 } else if (exts
->action
&& tb
[exts
->action
]) {
3061 err
= tcf_action_init(net
, tp
, tb
[exts
->action
],
3062 rate_tlv
, NULL
, ovr
, TCA_ACT_BIND
,
3063 exts
->actions
, &attr_size
,
3067 exts
->nr_actions
= err
;
3071 if ((exts
->action
&& tb
[exts
->action
]) ||
3072 (exts
->police
&& tb
[exts
->police
])) {
3073 NL_SET_ERR_MSG(extack
, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3080 EXPORT_SYMBOL(tcf_exts_validate
);
3082 void tcf_exts_change(struct tcf_exts
*dst
, struct tcf_exts
*src
)
3084 #ifdef CONFIG_NET_CLS_ACT
3085 struct tcf_exts old
= *dst
;
3088 tcf_exts_destroy(&old
);
3091 EXPORT_SYMBOL(tcf_exts_change
);
3093 #ifdef CONFIG_NET_CLS_ACT
3094 static struct tc_action
*tcf_exts_first_act(struct tcf_exts
*exts
)
3096 if (exts
->nr_actions
== 0)
3099 return exts
->actions
[0];
3103 int tcf_exts_dump(struct sk_buff
*skb
, struct tcf_exts
*exts
)
3105 #ifdef CONFIG_NET_CLS_ACT
3106 struct nlattr
*nest
;
3108 if (exts
->action
&& tcf_exts_has_actions(exts
)) {
3110 * again for backward compatible mode - we want
3111 * to work with both old and new modes of entering
3112 * tc data even if iproute2 was newer - jhs
3114 if (exts
->type
!= TCA_OLD_COMPAT
) {
3115 nest
= nla_nest_start_noflag(skb
, exts
->action
);
3117 goto nla_put_failure
;
3119 if (tcf_action_dump(skb
, exts
->actions
, 0, 0) < 0)
3120 goto nla_put_failure
;
3121 nla_nest_end(skb
, nest
);
3122 } else if (exts
->police
) {
3123 struct tc_action
*act
= tcf_exts_first_act(exts
);
3124 nest
= nla_nest_start_noflag(skb
, exts
->police
);
3125 if (nest
== NULL
|| !act
)
3126 goto nla_put_failure
;
3127 if (tcf_action_dump_old(skb
, act
, 0, 0) < 0)
3128 goto nla_put_failure
;
3129 nla_nest_end(skb
, nest
);
3135 nla_nest_cancel(skb
, nest
);
3141 EXPORT_SYMBOL(tcf_exts_dump
);
3144 int tcf_exts_dump_stats(struct sk_buff
*skb
, struct tcf_exts
*exts
)
3146 #ifdef CONFIG_NET_CLS_ACT
3147 struct tc_action
*a
= tcf_exts_first_act(exts
);
3148 if (a
!= NULL
&& tcf_action_copy_stats(skb
, a
, 1) < 0)
3153 EXPORT_SYMBOL(tcf_exts_dump_stats
);
3155 int tc_setup_cb_call(struct tcf_block
*block
, enum tc_setup_type type
,
3156 void *type_data
, bool err_stop
)
3158 struct tcf_block_cb
*block_cb
;
3162 /* Make sure all netdevs sharing this block are offload-capable. */
3163 if (block
->nooffloaddevcnt
&& err_stop
)
3166 list_for_each_entry(block_cb
, &block
->cb_list
, list
) {
3167 err
= block_cb
->cb(type
, type_data
, block_cb
->cb_priv
);
3177 EXPORT_SYMBOL(tc_setup_cb_call
);
3179 int tc_setup_flow_action(struct flow_action
*flow_action
,
3180 const struct tcf_exts
*exts
)
3182 const struct tc_action
*act
;
3189 tcf_exts_for_each_action(i
, act
, exts
) {
3190 struct flow_action_entry
*entry
;
3192 entry
= &flow_action
->entries
[j
];
3193 if (is_tcf_gact_ok(act
)) {
3194 entry
->id
= FLOW_ACTION_ACCEPT
;
3195 } else if (is_tcf_gact_shot(act
)) {
3196 entry
->id
= FLOW_ACTION_DROP
;
3197 } else if (is_tcf_gact_trap(act
)) {
3198 entry
->id
= FLOW_ACTION_TRAP
;
3199 } else if (is_tcf_gact_goto_chain(act
)) {
3200 entry
->id
= FLOW_ACTION_GOTO
;
3201 entry
->chain_index
= tcf_gact_goto_chain_index(act
);
3202 } else if (is_tcf_mirred_egress_redirect(act
)) {
3203 entry
->id
= FLOW_ACTION_REDIRECT
;
3204 entry
->dev
= tcf_mirred_dev(act
);
3205 } else if (is_tcf_mirred_egress_mirror(act
)) {
3206 entry
->id
= FLOW_ACTION_MIRRED
;
3207 entry
->dev
= tcf_mirred_dev(act
);
3208 } else if (is_tcf_vlan(act
)) {
3209 switch (tcf_vlan_action(act
)) {
3210 case TCA_VLAN_ACT_PUSH
:
3211 entry
->id
= FLOW_ACTION_VLAN_PUSH
;
3212 entry
->vlan
.vid
= tcf_vlan_push_vid(act
);
3213 entry
->vlan
.proto
= tcf_vlan_push_proto(act
);
3214 entry
->vlan
.prio
= tcf_vlan_push_prio(act
);
3216 case TCA_VLAN_ACT_POP
:
3217 entry
->id
= FLOW_ACTION_VLAN_POP
;
3219 case TCA_VLAN_ACT_MODIFY
:
3220 entry
->id
= FLOW_ACTION_VLAN_MANGLE
;
3221 entry
->vlan
.vid
= tcf_vlan_push_vid(act
);
3222 entry
->vlan
.proto
= tcf_vlan_push_proto(act
);
3223 entry
->vlan
.prio
= tcf_vlan_push_prio(act
);
3228 } else if (is_tcf_tunnel_set(act
)) {
3229 entry
->id
= FLOW_ACTION_TUNNEL_ENCAP
;
3230 entry
->tunnel
= tcf_tunnel_info(act
);
3231 } else if (is_tcf_tunnel_release(act
)) {
3232 entry
->id
= FLOW_ACTION_TUNNEL_DECAP
;
3233 } else if (is_tcf_pedit(act
)) {
3234 for (k
= 0; k
< tcf_pedit_nkeys(act
); k
++) {
3235 switch (tcf_pedit_cmd(act
, k
)) {
3236 case TCA_PEDIT_KEY_EX_CMD_SET
:
3237 entry
->id
= FLOW_ACTION_MANGLE
;
3239 case TCA_PEDIT_KEY_EX_CMD_ADD
:
3240 entry
->id
= FLOW_ACTION_ADD
;
3245 entry
->mangle
.htype
= tcf_pedit_htype(act
, k
);
3246 entry
->mangle
.mask
= tcf_pedit_mask(act
, k
);
3247 entry
->mangle
.val
= tcf_pedit_val(act
, k
);
3248 entry
->mangle
.offset
= tcf_pedit_offset(act
, k
);
3249 entry
= &flow_action
->entries
[++j
];
3251 } else if (is_tcf_csum(act
)) {
3252 entry
->id
= FLOW_ACTION_CSUM
;
3253 entry
->csum_flags
= tcf_csum_update_flags(act
);
3254 } else if (is_tcf_skbedit_mark(act
)) {
3255 entry
->id
= FLOW_ACTION_MARK
;
3256 entry
->mark
= tcf_skbedit_mark(act
);
3257 } else if (is_tcf_sample(act
)) {
3258 entry
->id
= FLOW_ACTION_SAMPLE
;
3259 entry
->sample
.psample_group
=
3260 tcf_sample_psample_group(act
);
3261 entry
->sample
.trunc_size
= tcf_sample_trunc_size(act
);
3262 entry
->sample
.truncate
= tcf_sample_truncate(act
);
3263 entry
->sample
.rate
= tcf_sample_rate(act
);
3264 } else if (is_tcf_police(act
)) {
3265 entry
->id
= FLOW_ACTION_POLICE
;
3266 entry
->police
.burst
= tcf_police_tcfp_burst(act
);
3267 entry
->police
.rate_bytes_ps
=
3268 tcf_police_rate_bytes_ps(act
);
3273 if (!is_tcf_pedit(act
))
3280 EXPORT_SYMBOL(tc_setup_flow_action
);
3282 unsigned int tcf_exts_num_actions(struct tcf_exts
*exts
)
3284 unsigned int num_acts
= 0;
3285 struct tc_action
*act
;
3288 tcf_exts_for_each_action(i
, act
, exts
) {
3289 if (is_tcf_pedit(act
))
3290 num_acts
+= tcf_pedit_nkeys(act
);
3296 EXPORT_SYMBOL(tcf_exts_num_actions
);
3298 static __net_init
int tcf_net_init(struct net
*net
)
3300 struct tcf_net
*tn
= net_generic(net
, tcf_net_id
);
3302 spin_lock_init(&tn
->idr_lock
);
3307 static void __net_exit
tcf_net_exit(struct net
*net
)
3309 struct tcf_net
*tn
= net_generic(net
, tcf_net_id
);
3311 idr_destroy(&tn
->idr
);
3314 static struct pernet_operations tcf_net_ops
= {
3315 .init
= tcf_net_init
,
3316 .exit
= tcf_net_exit
,
3318 .size
= sizeof(struct tcf_net
),
3321 static int __init
tc_filter_init(void)
3325 tc_filter_wq
= alloc_ordered_workqueue("tc_filter_workqueue", 0);
3329 err
= register_pernet_subsys(&tcf_net_ops
);
3331 goto err_register_pernet_subsys
;
3333 err
= rhashtable_init(&indr_setup_block_ht
,
3334 &tc_indr_setup_block_ht_params
);
3336 goto err_rhash_setup_block_ht
;
3338 rtnl_register(PF_UNSPEC
, RTM_NEWTFILTER
, tc_new_tfilter
, NULL
,
3339 RTNL_FLAG_DOIT_UNLOCKED
);
3340 rtnl_register(PF_UNSPEC
, RTM_DELTFILTER
, tc_del_tfilter
, NULL
,
3341 RTNL_FLAG_DOIT_UNLOCKED
);
3342 rtnl_register(PF_UNSPEC
, RTM_GETTFILTER
, tc_get_tfilter
,
3343 tc_dump_tfilter
, RTNL_FLAG_DOIT_UNLOCKED
);
3344 rtnl_register(PF_UNSPEC
, RTM_NEWCHAIN
, tc_ctl_chain
, NULL
, 0);
3345 rtnl_register(PF_UNSPEC
, RTM_DELCHAIN
, tc_ctl_chain
, NULL
, 0);
3346 rtnl_register(PF_UNSPEC
, RTM_GETCHAIN
, tc_ctl_chain
,
3351 err_rhash_setup_block_ht
:
3352 unregister_pernet_subsys(&tcf_net_ops
);
3353 err_register_pernet_subsys
:
3354 destroy_workqueue(tc_filter_wq
);
3358 subsys_initcall(tc_filter_init
);