1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_CLS_H
3 #define __NET_PKT_CLS_H
5 #include <linux/pkt_cls.h>
6 #include <linux/workqueue.h>
7 #include <net/sch_generic.h>
8 #include <net/act_api.h>
9 #include <net/net_namespace.h>
11 /* TC action not accessible from user space */
12 #define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1)
14 /* Basic packet classifier frontend definitions. */
22 int (*fn
)(struct tcf_proto
*, void *node
, struct tcf_walker
*);
25 int register_tcf_proto_ops(struct tcf_proto_ops
*ops
);
26 int unregister_tcf_proto_ops(struct tcf_proto_ops
*ops
);
28 struct tcf_block_ext_info
{
29 enum flow_block_binder_type binder_type
;
30 tcf_chain_head_change_t
*chain_head_change
;
31 void *chain_head_change_priv
;
36 bool tcf_queue_work(struct rcu_work
*rwork
, work_func_t func
);
39 struct tcf_chain
*tcf_chain_get_by_act(struct tcf_block
*block
,
41 void tcf_chain_put_by_act(struct tcf_chain
*chain
);
42 struct tcf_chain
*tcf_get_next_chain(struct tcf_block
*block
,
43 struct tcf_chain
*chain
);
44 struct tcf_proto
*tcf_get_next_proto(struct tcf_chain
*chain
,
45 struct tcf_proto
*tp
, bool rtnl_held
);
46 void tcf_block_netif_keep_dst(struct tcf_block
*block
);
47 int tcf_block_get(struct tcf_block
**p_block
,
48 struct tcf_proto __rcu
**p_filter_chain
, struct Qdisc
*q
,
49 struct netlink_ext_ack
*extack
);
50 int tcf_block_get_ext(struct tcf_block
**p_block
, struct Qdisc
*q
,
51 struct tcf_block_ext_info
*ei
,
52 struct netlink_ext_ack
*extack
);
53 void tcf_block_put(struct tcf_block
*block
);
54 void tcf_block_put_ext(struct tcf_block
*block
, struct Qdisc
*q
,
55 struct tcf_block_ext_info
*ei
);
57 static inline bool tcf_block_shared(struct tcf_block
*block
)
62 static inline bool tcf_block_non_null_shared(struct tcf_block
*block
)
64 return block
&& block
->index
;
67 static inline struct Qdisc
*tcf_block_q(struct tcf_block
*block
)
69 WARN_ON(tcf_block_shared(block
));
73 int __tc_indr_block_cb_register(struct net_device
*dev
, void *cb_priv
,
74 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
);
75 int tc_indr_block_cb_register(struct net_device
*dev
, void *cb_priv
,
76 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
);
77 void __tc_indr_block_cb_unregister(struct net_device
*dev
,
78 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
);
79 void tc_indr_block_cb_unregister(struct net_device
*dev
,
80 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
);
82 int tcf_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
83 struct tcf_result
*res
, bool compat_mode
);
86 static inline bool tcf_block_shared(struct tcf_block
*block
)
91 static inline bool tcf_block_non_null_shared(struct tcf_block
*block
)
97 int tcf_block_get(struct tcf_block
**p_block
,
98 struct tcf_proto __rcu
**p_filter_chain
, struct Qdisc
*q
,
99 struct netlink_ext_ack
*extack
)
105 int tcf_block_get_ext(struct tcf_block
**p_block
, struct Qdisc
*q
,
106 struct tcf_block_ext_info
*ei
,
107 struct netlink_ext_ack
*extack
)
112 static inline void tcf_block_put(struct tcf_block
*block
)
117 void tcf_block_put_ext(struct tcf_block
*block
, struct Qdisc
*q
,
118 struct tcf_block_ext_info
*ei
)
122 static inline struct Qdisc
*tcf_block_q(struct tcf_block
*block
)
128 int tc_setup_cb_block_register(struct tcf_block
*block
, flow_setup_cb_t
*cb
,
135 void tc_setup_cb_block_unregister(struct tcf_block
*block
, flow_setup_cb_t
*cb
,
141 int __tc_indr_block_cb_register(struct net_device
*dev
, void *cb_priv
,
142 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
)
148 int tc_indr_block_cb_register(struct net_device
*dev
, void *cb_priv
,
149 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
)
155 void __tc_indr_block_cb_unregister(struct net_device
*dev
,
156 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
)
161 void tc_indr_block_cb_unregister(struct net_device
*dev
,
162 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
)
166 static inline int tcf_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
167 struct tcf_result
*res
, bool compat_mode
)
169 return TC_ACT_UNSPEC
;
173 static inline unsigned long
174 __cls_set_class(unsigned long *clp
, unsigned long cl
)
176 return xchg(clp
, cl
);
179 static inline unsigned long
180 cls_set_class(struct Qdisc
*q
, unsigned long *clp
, unsigned long cl
)
182 unsigned long old_cl
;
185 old_cl
= __cls_set_class(clp
, cl
);
191 tcf_bind_filter(struct tcf_proto
*tp
, struct tcf_result
*r
, unsigned long base
)
193 struct Qdisc
*q
= tp
->chain
->block
->q
;
196 /* Check q as it is not set for shared blocks. In that case,
197 * setting class is not supported.
201 cl
= q
->ops
->cl_ops
->bind_tcf(q
, base
, r
->classid
);
202 cl
= cls_set_class(q
, &r
->class, cl
);
204 q
->ops
->cl_ops
->unbind_tcf(q
, cl
);
208 tcf_unbind_filter(struct tcf_proto
*tp
, struct tcf_result
*r
)
210 struct Qdisc
*q
= tp
->chain
->block
->q
;
215 if ((cl
= __cls_set_class(&r
->class, 0)) != 0)
216 q
->ops
->cl_ops
->unbind_tcf(q
, cl
);
220 #ifdef CONFIG_NET_CLS_ACT
221 __u32 type
; /* for backward compat(TCA_OLD_COMPAT) */
223 struct tc_action
**actions
;
226 /* Map to export classifier specific extension TLV types to the
227 * generic extensions API. Unsupported extensions must be set to 0.
233 static inline int tcf_exts_init(struct tcf_exts
*exts
, struct net
*net
,
234 int action
, int police
)
236 #ifdef CONFIG_NET_CLS_ACT
238 exts
->nr_actions
= 0;
240 exts
->actions
= kcalloc(TCA_ACT_MAX_PRIO
, sizeof(struct tc_action
*),
245 exts
->action
= action
;
246 exts
->police
= police
;
250 /* Return false if the netns is being destroyed in cleanup_net(). Callers
251 * need to do cleanup synchronously in this case, otherwise may race with
252 * tc_action_net_exit(). Return true for other cases.
254 static inline bool tcf_exts_get_net(struct tcf_exts
*exts
)
256 #ifdef CONFIG_NET_CLS_ACT
257 exts
->net
= maybe_get_net(exts
->net
);
258 return exts
->net
!= NULL
;
264 static inline void tcf_exts_put_net(struct tcf_exts
*exts
)
266 #ifdef CONFIG_NET_CLS_ACT
272 #ifdef CONFIG_NET_CLS_ACT
273 #define tcf_exts_for_each_action(i, a, exts) \
274 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
276 #define tcf_exts_for_each_action(i, a, exts) \
277 for (; 0; (void)(i), (void)(a), (void)(exts))
281 tcf_exts_stats_update(const struct tcf_exts
*exts
,
282 u64 bytes
, u64 packets
, u64 lastuse
)
284 #ifdef CONFIG_NET_CLS_ACT
289 for (i
= 0; i
< exts
->nr_actions
; i
++) {
290 struct tc_action
*a
= exts
->actions
[i
];
292 tcf_action_stats_update(a
, bytes
, packets
, lastuse
, true);
300 * tcf_exts_has_actions - check if at least one action is present
301 * @exts: tc filter extensions handle
303 * Returns true if at least one action is present.
305 static inline bool tcf_exts_has_actions(struct tcf_exts
*exts
)
307 #ifdef CONFIG_NET_CLS_ACT
308 return exts
->nr_actions
;
315 * tcf_exts_exec - execute tc filter extensions
316 * @skb: socket buffer
317 * @exts: tc filter extensions handle
318 * @res: desired result
320 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
321 * a negative number if the filter must be considered unmatched or
322 * a positive action code (TC_ACT_*) which must be returned to the
326 tcf_exts_exec(struct sk_buff
*skb
, struct tcf_exts
*exts
,
327 struct tcf_result
*res
)
329 #ifdef CONFIG_NET_CLS_ACT
330 return tcf_action_exec(skb
, exts
->actions
, exts
->nr_actions
, res
);
335 int tcf_exts_validate(struct net
*net
, struct tcf_proto
*tp
,
336 struct nlattr
**tb
, struct nlattr
*rate_tlv
,
337 struct tcf_exts
*exts
, bool ovr
, bool rtnl_held
,
338 struct netlink_ext_ack
*extack
);
339 void tcf_exts_destroy(struct tcf_exts
*exts
);
340 void tcf_exts_change(struct tcf_exts
*dst
, struct tcf_exts
*src
);
341 int tcf_exts_dump(struct sk_buff
*skb
, struct tcf_exts
*exts
);
342 int tcf_exts_dump_stats(struct sk_buff
*skb
, struct tcf_exts
*exts
);
345 * struct tcf_pkt_info - packet information
347 struct tcf_pkt_info
{
352 #ifdef CONFIG_NET_EMATCH
354 struct tcf_ematch_ops
;
357 * struct tcf_ematch - extended match (ematch)
359 * @matchid: identifier to allow userspace to reidentify a match
360 * @flags: flags specifying attributes and the relation to other matches
361 * @ops: the operations lookup table of the corresponding ematch module
362 * @datalen: length of the ematch specific configuration data
363 * @data: ematch specific data
366 struct tcf_ematch_ops
* ops
;
368 unsigned int datalen
;
374 static inline int tcf_em_is_container(struct tcf_ematch
*em
)
379 static inline int tcf_em_is_simple(struct tcf_ematch
*em
)
381 return em
->flags
& TCF_EM_SIMPLE
;
384 static inline int tcf_em_is_inverted(struct tcf_ematch
*em
)
386 return em
->flags
& TCF_EM_INVERT
;
389 static inline int tcf_em_last_match(struct tcf_ematch
*em
)
391 return (em
->flags
& TCF_EM_REL_MASK
) == TCF_EM_REL_END
;
394 static inline int tcf_em_early_end(struct tcf_ematch
*em
, int result
)
396 if (tcf_em_last_match(em
))
399 if (result
== 0 && em
->flags
& TCF_EM_REL_AND
)
402 if (result
!= 0 && em
->flags
& TCF_EM_REL_OR
)
409 * struct tcf_ematch_tree - ematch tree handle
411 * @hdr: ematch tree header supplied by userspace
412 * @matches: array of ematches
414 struct tcf_ematch_tree
{
415 struct tcf_ematch_tree_hdr hdr
;
416 struct tcf_ematch
* matches
;
421 * struct tcf_ematch_ops - ematch module operations
423 * @kind: identifier (kind) of this ematch module
424 * @datalen: length of expected configuration data (optional)
425 * @change: called during validation (optional)
426 * @match: called during ematch tree evaluation, must return 1/0
427 * @destroy: called during destroyage (optional)
428 * @dump: called during dumping process (optional)
429 * @owner: owner, must be set to THIS_MODULE
430 * @link: link to previous/next ematch module (internal use)
432 struct tcf_ematch_ops
{
435 int (*change
)(struct net
*net
, void *,
436 int, struct tcf_ematch
*);
437 int (*match
)(struct sk_buff
*, struct tcf_ematch
*,
438 struct tcf_pkt_info
*);
439 void (*destroy
)(struct tcf_ematch
*);
440 int (*dump
)(struct sk_buff
*, struct tcf_ematch
*);
441 struct module
*owner
;
442 struct list_head link
;
445 int tcf_em_register(struct tcf_ematch_ops
*);
446 void tcf_em_unregister(struct tcf_ematch_ops
*);
447 int tcf_em_tree_validate(struct tcf_proto
*, struct nlattr
*,
448 struct tcf_ematch_tree
*);
449 void tcf_em_tree_destroy(struct tcf_ematch_tree
*);
450 int tcf_em_tree_dump(struct sk_buff
*, struct tcf_ematch_tree
*, int);
451 int __tcf_em_tree_match(struct sk_buff
*, struct tcf_ematch_tree
*,
452 struct tcf_pkt_info
*);
455 * tcf_em_tree_match - evaulate an ematch tree
457 * @skb: socket buffer of the packet in question
458 * @tree: ematch tree to be used for evaluation
459 * @info: packet information examined by classifier
461 * This function matches @skb against the ematch tree in @tree by going
462 * through all ematches respecting their logic relations returning
463 * as soon as the result is obvious.
465 * Returns 1 if the ematch tree as-one matches, no ematches are configured
466 * or ematch is not enabled in the kernel, otherwise 0 is returned.
468 static inline int tcf_em_tree_match(struct sk_buff
*skb
,
469 struct tcf_ematch_tree
*tree
,
470 struct tcf_pkt_info
*info
)
472 if (tree
->hdr
.nmatches
)
473 return __tcf_em_tree_match(skb
, tree
, info
);
478 #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
480 #else /* CONFIG_NET_EMATCH */
482 struct tcf_ematch_tree
{
485 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
486 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
487 #define tcf_em_tree_dump(skb, t, tlv) (0)
488 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
490 #endif /* CONFIG_NET_EMATCH */
492 static inline unsigned char * tcf_get_base_ptr(struct sk_buff
*skb
, int layer
)
496 return skb_mac_header(skb
);
497 case TCF_LAYER_NETWORK
:
498 return skb_network_header(skb
);
499 case TCF_LAYER_TRANSPORT
:
500 return skb_transport_header(skb
);
506 static inline int tcf_valid_offset(const struct sk_buff
*skb
,
507 const unsigned char *ptr
, const int len
)
509 return likely((ptr
+ len
) <= skb_tail_pointer(skb
) &&
511 (ptr
<= (ptr
+ len
)));
515 tcf_change_indev(struct net
*net
, struct nlattr
*indev_tlv
,
516 struct netlink_ext_ack
*extack
)
518 char indev
[IFNAMSIZ
];
519 struct net_device
*dev
;
521 if (nla_strlcpy(indev
, indev_tlv
, IFNAMSIZ
) >= IFNAMSIZ
) {
522 NL_SET_ERR_MSG(extack
, "Interface name too long");
525 dev
= __dev_get_by_name(net
, indev
);
532 tcf_match_indev(struct sk_buff
*skb
, int ifindex
)
538 return ifindex
== skb
->skb_iif
;
541 int tc_setup_flow_action(struct flow_action
*flow_action
,
542 const struct tcf_exts
*exts
);
543 int tc_setup_cb_call(struct tcf_block
*block
, enum tc_setup_type type
,
544 void *type_data
, bool err_stop
);
545 unsigned int tcf_exts_num_actions(struct tcf_exts
*exts
);
547 struct tc_cls_u32_knode
{
548 struct tcf_exts
*exts
;
549 struct tcf_result
*res
;
550 struct tc_u32_sel
*sel
;
558 struct tc_cls_u32_hnode
{
561 unsigned int divisor
;
564 enum tc_clsu32_command
{
566 TC_CLSU32_REPLACE_KNODE
,
567 TC_CLSU32_DELETE_KNODE
,
569 TC_CLSU32_REPLACE_HNODE
,
570 TC_CLSU32_DELETE_HNODE
,
573 struct tc_cls_u32_offload
{
574 struct flow_cls_common_offload common
;
576 enum tc_clsu32_command command
;
578 struct tc_cls_u32_knode knode
;
579 struct tc_cls_u32_hnode hnode
;
583 static inline bool tc_can_offload(const struct net_device
*dev
)
585 return dev
->features
& NETIF_F_HW_TC
;
588 static inline bool tc_can_offload_extack(const struct net_device
*dev
,
589 struct netlink_ext_ack
*extack
)
591 bool can
= tc_can_offload(dev
);
594 NL_SET_ERR_MSG(extack
, "TC offload is disabled on net device");
600 tc_cls_can_offload_and_chain0(const struct net_device
*dev
,
601 struct flow_cls_common_offload
*common
)
603 if (!tc_can_offload_extack(dev
, common
->extack
))
605 if (common
->chain_index
) {
606 NL_SET_ERR_MSG(common
->extack
,
607 "Driver supports only offload of chain 0");
613 static inline bool tc_skip_hw(u32 flags
)
615 return (flags
& TCA_CLS_FLAGS_SKIP_HW
) ? true : false;
618 static inline bool tc_skip_sw(u32 flags
)
620 return (flags
& TCA_CLS_FLAGS_SKIP_SW
) ? true : false;
623 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
624 static inline bool tc_flags_valid(u32 flags
)
626 if (flags
& ~(TCA_CLS_FLAGS_SKIP_HW
| TCA_CLS_FLAGS_SKIP_SW
|
627 TCA_CLS_FLAGS_VERBOSE
))
630 flags
&= TCA_CLS_FLAGS_SKIP_HW
| TCA_CLS_FLAGS_SKIP_SW
;
631 if (!(flags
^ (TCA_CLS_FLAGS_SKIP_HW
| TCA_CLS_FLAGS_SKIP_SW
)))
637 static inline bool tc_in_hw(u32 flags
)
639 return (flags
& TCA_CLS_FLAGS_IN_HW
) ? true : false;
643 tc_cls_common_offload_init(struct flow_cls_common_offload
*cls_common
,
644 const struct tcf_proto
*tp
, u32 flags
,
645 struct netlink_ext_ack
*extack
)
647 cls_common
->chain_index
= tp
->chain
->index
;
648 cls_common
->protocol
= tp
->protocol
;
649 cls_common
->prio
= tp
->prio
>> 16;
650 if (tc_skip_sw(flags
) || flags
& TCA_CLS_FLAGS_VERBOSE
)
651 cls_common
->extack
= extack
;
654 enum tc_matchall_command
{
655 TC_CLSMATCHALL_REPLACE
,
656 TC_CLSMATCHALL_DESTROY
,
657 TC_CLSMATCHALL_STATS
,
660 struct tc_cls_matchall_offload
{
661 struct flow_cls_common_offload common
;
662 enum tc_matchall_command command
;
663 struct flow_rule
*rule
;
664 struct flow_stats stats
;
665 unsigned long cookie
;
668 enum tc_clsbpf_command
{
673 struct tc_cls_bpf_offload
{
674 struct flow_cls_common_offload common
;
675 enum tc_clsbpf_command command
;
676 struct tcf_exts
*exts
;
677 struct bpf_prog
*prog
;
678 struct bpf_prog
*oldprog
;
680 bool exts_integrated
;
683 struct tc_mqprio_qopt_offload
{
684 /* struct tc_mqprio_qopt must always be the first element */
685 struct tc_mqprio_qopt qopt
;
689 u64 min_rate
[TC_QOPT_MAX_QUEUE
];
690 u64 max_rate
[TC_QOPT_MAX_QUEUE
];
693 /* This structure holds cookie structure that is passed from user
694 * to the kernel for actions and classifiers
702 struct tc_qopt_offload_stats
{
703 struct gnet_stats_basic_packed
*bstats
;
704 struct gnet_stats_queue
*qstats
;
714 struct tc_mq_opt_offload_graft_params
{
719 struct tc_mq_qopt_offload
{
720 enum tc_mq_command command
;
723 struct tc_qopt_offload_stats stats
;
724 struct tc_mq_opt_offload_graft_params graft_params
;
728 enum tc_red_command
{
736 struct tc_red_qopt_offload_params
{
743 struct gnet_stats_queue
*qstats
;
746 struct tc_red_qopt_offload
{
747 enum tc_red_command command
;
751 struct tc_red_qopt_offload_params set
;
752 struct tc_qopt_offload_stats stats
;
753 struct red_stats
*xstats
;
758 enum tc_gred_command
{
764 struct tc_gred_vq_qopt_offload_params
{
773 /* Only need backlog, see struct tc_prio_qopt_offload_params */
777 struct tc_gred_qopt_offload_params
{
782 struct gnet_stats_queue
*qstats
;
783 struct tc_gred_vq_qopt_offload_params tab
[MAX_DPs
];
786 struct tc_gred_qopt_offload_stats
{
787 struct gnet_stats_basic_packed bstats
[MAX_DPs
];
788 struct gnet_stats_queue qstats
[MAX_DPs
];
789 struct red_stats
*xstats
[MAX_DPs
];
792 struct tc_gred_qopt_offload
{
793 enum tc_gred_command command
;
797 struct tc_gred_qopt_offload_params set
;
798 struct tc_gred_qopt_offload_stats stats
;
802 enum tc_prio_command
{
809 struct tc_prio_qopt_offload_params
{
811 u8 priomap
[TC_PRIO_MAX
+ 1];
812 /* In case that a prio qdisc is offloaded and now is changed to a
813 * non-offloadedable config, it needs to update the backlog & qlen
814 * values to negate the HW backlog & qlen values (and only them).
816 struct gnet_stats_queue
*qstats
;
819 struct tc_prio_qopt_offload_graft_params
{
824 struct tc_prio_qopt_offload
{
825 enum tc_prio_command command
;
829 struct tc_prio_qopt_offload_params replace_params
;
830 struct tc_qopt_offload_stats stats
;
831 struct tc_prio_qopt_offload_graft_params graft_params
;
835 enum tc_root_command
{
839 struct tc_root_qopt_offload
{
840 enum tc_root_command command
;