1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/ife.c Inter-FE action based on ForCES WG InterFE LFB
6 * draft-ietf-forces-interfelfb-03
9 * "Distributing Linux Traffic Control Classifier-Action
11 * Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
13 * copyright Jamal Hadi Salim (2015)
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <net/net_namespace.h>
25 #include <net/netlink.h>
26 #include <net/pkt_sched.h>
27 #include <net/pkt_cls.h>
28 #include <uapi/linux/tc_act/tc_ife.h>
29 #include <net/tc_act/tc_ife.h>
30 #include <linux/etherdevice.h>
33 static unsigned int ife_net_id
;
34 static int max_metacnt
= IFE_META_MAX
+ 1;
35 static struct tc_action_ops act_ife_ops
;
37 static const struct nla_policy ife_policy
[TCA_IFE_MAX
+ 1] = {
38 [TCA_IFE_PARMS
] = { .len
= sizeof(struct tc_ife
)},
39 [TCA_IFE_DMAC
] = { .len
= ETH_ALEN
},
40 [TCA_IFE_SMAC
] = { .len
= ETH_ALEN
},
41 [TCA_IFE_TYPE
] = { .type
= NLA_U16
},
44 int ife_encode_meta_u16(u16 metaval
, void *skbdata
, struct tcf_meta_info
*mi
)
49 edata
= *(u16
*)mi
->metaval
;
53 if (!edata
) /* will not encode */
57 return ife_tlv_meta_encode(skbdata
, mi
->metaid
, 2, &edata
);
59 EXPORT_SYMBOL_GPL(ife_encode_meta_u16
);
61 int ife_get_meta_u32(struct sk_buff
*skb
, struct tcf_meta_info
*mi
)
64 return nla_put_u32(skb
, mi
->metaid
, *(u32
*)mi
->metaval
);
66 return nla_put(skb
, mi
->metaid
, 0, NULL
);
68 EXPORT_SYMBOL_GPL(ife_get_meta_u32
);
70 int ife_check_meta_u32(u32 metaval
, struct tcf_meta_info
*mi
)
72 if (metaval
|| mi
->metaval
)
73 return 8; /* T+L+V == 2+2+4 */
77 EXPORT_SYMBOL_GPL(ife_check_meta_u32
);
79 int ife_check_meta_u16(u16 metaval
, struct tcf_meta_info
*mi
)
81 if (metaval
|| mi
->metaval
)
82 return 8; /* T+L+(V) == 2+2+(2+2bytepad) */
86 EXPORT_SYMBOL_GPL(ife_check_meta_u16
);
88 int ife_encode_meta_u32(u32 metaval
, void *skbdata
, struct tcf_meta_info
*mi
)
93 edata
= *(u32
*)mi
->metaval
;
97 if (!edata
) /* will not encode */
100 edata
= htonl(edata
);
101 return ife_tlv_meta_encode(skbdata
, mi
->metaid
, 4, &edata
);
103 EXPORT_SYMBOL_GPL(ife_encode_meta_u32
);
105 int ife_get_meta_u16(struct sk_buff
*skb
, struct tcf_meta_info
*mi
)
108 return nla_put_u16(skb
, mi
->metaid
, *(u16
*)mi
->metaval
);
110 return nla_put(skb
, mi
->metaid
, 0, NULL
);
112 EXPORT_SYMBOL_GPL(ife_get_meta_u16
);
114 int ife_alloc_meta_u32(struct tcf_meta_info
*mi
, void *metaval
, gfp_t gfp
)
116 mi
->metaval
= kmemdup(metaval
, sizeof(u32
), gfp
);
122 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32
);
124 int ife_alloc_meta_u16(struct tcf_meta_info
*mi
, void *metaval
, gfp_t gfp
)
126 mi
->metaval
= kmemdup(metaval
, sizeof(u16
), gfp
);
132 EXPORT_SYMBOL_GPL(ife_alloc_meta_u16
);
134 void ife_release_meta_gen(struct tcf_meta_info
*mi
)
138 EXPORT_SYMBOL_GPL(ife_release_meta_gen
);
140 int ife_validate_meta_u32(void *val
, int len
)
142 if (len
== sizeof(u32
))
147 EXPORT_SYMBOL_GPL(ife_validate_meta_u32
);
149 int ife_validate_meta_u16(void *val
, int len
)
151 /* length will not include padding */
152 if (len
== sizeof(u16
))
157 EXPORT_SYMBOL_GPL(ife_validate_meta_u16
);
159 static LIST_HEAD(ifeoplist
);
160 static DEFINE_RWLOCK(ife_mod_lock
);
162 static struct tcf_meta_ops
*find_ife_oplist(u16 metaid
)
164 struct tcf_meta_ops
*o
;
166 read_lock(&ife_mod_lock
);
167 list_for_each_entry(o
, &ifeoplist
, list
) {
168 if (o
->metaid
== metaid
) {
169 if (!try_module_get(o
->owner
))
171 read_unlock(&ife_mod_lock
);
175 read_unlock(&ife_mod_lock
);
180 int register_ife_op(struct tcf_meta_ops
*mops
)
182 struct tcf_meta_ops
*m
;
184 if (!mops
->metaid
|| !mops
->metatype
|| !mops
->name
||
185 !mops
->check_presence
|| !mops
->encode
|| !mops
->decode
||
186 !mops
->get
|| !mops
->alloc
)
189 write_lock(&ife_mod_lock
);
191 list_for_each_entry(m
, &ifeoplist
, list
) {
192 if (m
->metaid
== mops
->metaid
||
193 (strcmp(mops
->name
, m
->name
) == 0)) {
194 write_unlock(&ife_mod_lock
);
200 mops
->release
= ife_release_meta_gen
;
202 list_add_tail(&mops
->list
, &ifeoplist
);
203 write_unlock(&ife_mod_lock
);
206 EXPORT_SYMBOL_GPL(unregister_ife_op
);
208 int unregister_ife_op(struct tcf_meta_ops
*mops
)
210 struct tcf_meta_ops
*m
;
213 write_lock(&ife_mod_lock
);
214 list_for_each_entry(m
, &ifeoplist
, list
) {
215 if (m
->metaid
== mops
->metaid
) {
216 list_del(&mops
->list
);
221 write_unlock(&ife_mod_lock
);
225 EXPORT_SYMBOL_GPL(register_ife_op
);
227 static int ife_validate_metatype(struct tcf_meta_ops
*ops
, void *val
, int len
)
230 /* XXX: unfortunately cant use nla_policy at this point
231 * because a length of 0 is valid in the case of
232 * "allow". "use" semantics do enforce for proper
233 * length and i couldve use nla_policy but it makes it hard
234 * to use it just for that..
237 return ops
->validate(val
, len
);
239 if (ops
->metatype
== NLA_U32
)
240 ret
= ife_validate_meta_u32(val
, len
);
241 else if (ops
->metatype
== NLA_U16
)
242 ret
= ife_validate_meta_u16(val
, len
);
247 #ifdef CONFIG_MODULES
248 static const char *ife_meta_id2name(u32 metaid
)
251 case IFE_META_SKBMARK
:
255 case IFE_META_TCINDEX
:
263 /* called when adding new meta information
265 static int load_metaops_and_vet(u32 metaid
, void *val
, int len
, bool rtnl_held
)
267 struct tcf_meta_ops
*ops
= find_ife_oplist(metaid
);
272 #ifdef CONFIG_MODULES
275 request_module("ife-meta-%s", ife_meta_id2name(metaid
));
278 ops
= find_ife_oplist(metaid
);
285 ret
= ife_validate_metatype(ops
, val
, len
);
287 module_put(ops
->owner
);
293 /* called when adding new meta information
295 static int __add_metainfo(const struct tcf_meta_ops
*ops
,
296 struct tcf_ife_info
*ife
, u32 metaid
, void *metaval
,
297 int len
, bool atomic
, bool exists
)
299 struct tcf_meta_info
*mi
= NULL
;
302 mi
= kzalloc(sizeof(*mi
), atomic
? GFP_ATOMIC
: GFP_KERNEL
);
309 ret
= ops
->alloc(mi
, metaval
, atomic
? GFP_ATOMIC
: GFP_KERNEL
);
317 spin_lock_bh(&ife
->tcf_lock
);
318 list_add_tail(&mi
->metalist
, &ife
->metalist
);
320 spin_unlock_bh(&ife
->tcf_lock
);
325 static int add_metainfo_and_get_ops(const struct tcf_meta_ops
*ops
,
326 struct tcf_ife_info
*ife
, u32 metaid
,
331 if (!try_module_get(ops
->owner
))
333 ret
= __add_metainfo(ops
, ife
, metaid
, NULL
, 0, true, exists
);
335 module_put(ops
->owner
);
339 static int add_metainfo(struct tcf_ife_info
*ife
, u32 metaid
, void *metaval
,
340 int len
, bool exists
)
342 const struct tcf_meta_ops
*ops
= find_ife_oplist(metaid
);
347 ret
= __add_metainfo(ops
, ife
, metaid
, metaval
, len
, false, exists
);
349 /*put back what find_ife_oplist took */
350 module_put(ops
->owner
);
354 static int use_all_metadata(struct tcf_ife_info
*ife
, bool exists
)
356 struct tcf_meta_ops
*o
;
360 read_lock(&ife_mod_lock
);
361 list_for_each_entry(o
, &ifeoplist
, list
) {
362 rc
= add_metainfo_and_get_ops(o
, ife
, o
->metaid
, exists
);
366 read_unlock(&ife_mod_lock
);
374 static int dump_metalist(struct sk_buff
*skb
, struct tcf_ife_info
*ife
)
376 struct tcf_meta_info
*e
;
378 unsigned char *b
= skb_tail_pointer(skb
);
379 int total_encoded
= 0;
381 /*can only happen on decode */
382 if (list_empty(&ife
->metalist
))
385 nest
= nla_nest_start_noflag(skb
, TCA_IFE_METALST
);
389 list_for_each_entry(e
, &ife
->metalist
, metalist
) {
390 if (!e
->ops
->get(skb
, e
))
397 nla_nest_end(skb
, nest
);
406 /* under ife->tcf_lock */
407 static void _tcf_ife_cleanup(struct tc_action
*a
)
409 struct tcf_ife_info
*ife
= to_ife(a
);
410 struct tcf_meta_info
*e
, *n
;
412 list_for_each_entry_safe(e
, n
, &ife
->metalist
, metalist
) {
413 list_del(&e
->metalist
);
420 module_put(e
->ops
->owner
);
425 static void tcf_ife_cleanup(struct tc_action
*a
)
427 struct tcf_ife_info
*ife
= to_ife(a
);
428 struct tcf_ife_params
*p
;
430 spin_lock_bh(&ife
->tcf_lock
);
432 spin_unlock_bh(&ife
->tcf_lock
);
434 p
= rcu_dereference_protected(ife
->params
, 1);
439 static int load_metalist(struct nlattr
**tb
, bool rtnl_held
)
443 for (i
= 1; i
< max_metacnt
; i
++) {
445 void *val
= nla_data(tb
[i
]);
446 int len
= nla_len(tb
[i
]);
449 rc
= load_metaops_and_vet(i
, val
, len
, rtnl_held
);
458 static int populate_metalist(struct tcf_ife_info
*ife
, struct nlattr
**tb
,
459 bool exists
, bool rtnl_held
)
466 for (i
= 1; i
< max_metacnt
; i
++) {
468 val
= nla_data(tb
[i
]);
469 len
= nla_len(tb
[i
]);
471 rc
= add_metainfo(ife
, i
, val
, len
, exists
);
480 static int tcf_ife_init(struct net
*net
, struct nlattr
*nla
,
481 struct nlattr
*est
, struct tc_action
**a
,
482 int ovr
, int bind
, bool rtnl_held
,
483 struct tcf_proto
*tp
, u32 flags
,
484 struct netlink_ext_ack
*extack
)
486 struct tc_action_net
*tn
= net_generic(net
, ife_net_id
);
487 struct nlattr
*tb
[TCA_IFE_MAX
+ 1];
488 struct nlattr
*tb2
[IFE_META_MAX
+ 1];
489 struct tcf_chain
*goto_ch
= NULL
;
490 struct tcf_ife_params
*p
;
491 struct tcf_ife_info
*ife
;
492 u16 ife_type
= ETH_P_IFE
;
502 NL_SET_ERR_MSG_MOD(extack
, "IFE requires attributes to be passed");
506 err
= nla_parse_nested_deprecated(tb
, TCA_IFE_MAX
, nla
, ife_policy
,
511 if (!tb
[TCA_IFE_PARMS
])
514 parm
= nla_data(tb
[TCA_IFE_PARMS
]);
516 /* IFE_DECODE is 0 and indicates the opposite of IFE_ENCODE because
517 * they cannot run as the same time. Check on all other values which
518 * are not supported right now.
520 if (parm
->flags
& ~IFE_ENCODE
)
523 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
527 if (tb
[TCA_IFE_METALST
]) {
528 err
= nla_parse_nested_deprecated(tb2
, IFE_META_MAX
,
529 tb
[TCA_IFE_METALST
], NULL
,
535 err
= load_metalist(tb2
, rtnl_held
);
543 err
= tcf_idr_check_alloc(tn
, &index
, a
, bind
);
549 if (exists
&& bind
) {
555 ret
= tcf_idr_create(tn
, index
, est
, a
, &act_ife_ops
,
558 tcf_idr_cleanup(tn
, index
);
564 tcf_idr_release(*a
, bind
);
570 if (ret
== ACT_P_CREATED
)
571 INIT_LIST_HEAD(&ife
->metalist
);
573 err
= tcf_action_check_ctrlact(parm
->action
, tp
, &goto_ch
, extack
);
577 p
->flags
= parm
->flags
;
579 if (parm
->flags
& IFE_ENCODE
) {
580 if (tb
[TCA_IFE_TYPE
])
581 ife_type
= nla_get_u16(tb
[TCA_IFE_TYPE
]);
582 if (tb
[TCA_IFE_DMAC
])
583 daddr
= nla_data(tb
[TCA_IFE_DMAC
]);
584 if (tb
[TCA_IFE_SMAC
])
585 saddr
= nla_data(tb
[TCA_IFE_SMAC
]);
588 if (parm
->flags
& IFE_ENCODE
) {
590 ether_addr_copy(p
->eth_dst
, daddr
);
592 eth_zero_addr(p
->eth_dst
);
595 ether_addr_copy(p
->eth_src
, saddr
);
597 eth_zero_addr(p
->eth_src
);
599 p
->eth_type
= ife_type
;
602 if (tb
[TCA_IFE_METALST
]) {
603 err
= populate_metalist(ife
, tb2
, exists
, rtnl_held
);
605 goto metadata_parse_err
;
607 /* if no passed metadata allow list or passed allow-all
608 * then here we process by adding as many supported metadatum
609 * as we can. You better have at least one else we are
612 err
= use_all_metadata(ife
, exists
);
614 goto metadata_parse_err
;
618 spin_lock_bh(&ife
->tcf_lock
);
619 /* protected by tcf_lock when modifying existing action */
620 goto_ch
= tcf_action_set_ctrlact(*a
, parm
->action
, goto_ch
);
621 p
= rcu_replace_pointer(ife
->params
, p
, 1);
624 spin_unlock_bh(&ife
->tcf_lock
);
626 tcf_chain_put_by_act(goto_ch
);
633 tcf_chain_put_by_act(goto_ch
);
636 tcf_idr_release(*a
, bind
);
640 static int tcf_ife_dump(struct sk_buff
*skb
, struct tc_action
*a
, int bind
,
643 unsigned char *b
= skb_tail_pointer(skb
);
644 struct tcf_ife_info
*ife
= to_ife(a
);
645 struct tcf_ife_params
*p
;
646 struct tc_ife opt
= {
647 .index
= ife
->tcf_index
,
648 .refcnt
= refcount_read(&ife
->tcf_refcnt
) - ref
,
649 .bindcnt
= atomic_read(&ife
->tcf_bindcnt
) - bind
,
653 spin_lock_bh(&ife
->tcf_lock
);
654 opt
.action
= ife
->tcf_action
;
655 p
= rcu_dereference_protected(ife
->params
,
656 lockdep_is_held(&ife
->tcf_lock
));
657 opt
.flags
= p
->flags
;
659 if (nla_put(skb
, TCA_IFE_PARMS
, sizeof(opt
), &opt
))
660 goto nla_put_failure
;
662 tcf_tm_dump(&t
, &ife
->tcf_tm
);
663 if (nla_put_64bit(skb
, TCA_IFE_TM
, sizeof(t
), &t
, TCA_IFE_PAD
))
664 goto nla_put_failure
;
666 if (!is_zero_ether_addr(p
->eth_dst
)) {
667 if (nla_put(skb
, TCA_IFE_DMAC
, ETH_ALEN
, p
->eth_dst
))
668 goto nla_put_failure
;
671 if (!is_zero_ether_addr(p
->eth_src
)) {
672 if (nla_put(skb
, TCA_IFE_SMAC
, ETH_ALEN
, p
->eth_src
))
673 goto nla_put_failure
;
676 if (nla_put(skb
, TCA_IFE_TYPE
, 2, &p
->eth_type
))
677 goto nla_put_failure
;
679 if (dump_metalist(skb
, ife
)) {
680 /*ignore failure to dump metalist */
681 pr_info("Failed to dump metalist\n");
684 spin_unlock_bh(&ife
->tcf_lock
);
688 spin_unlock_bh(&ife
->tcf_lock
);
693 static int find_decode_metaid(struct sk_buff
*skb
, struct tcf_ife_info
*ife
,
694 u16 metaid
, u16 mlen
, void *mdata
)
696 struct tcf_meta_info
*e
;
698 /* XXX: use hash to speed up */
699 list_for_each_entry(e
, &ife
->metalist
, metalist
) {
700 if (metaid
== e
->metaid
) {
702 /* We check for decode presence already */
703 return e
->ops
->decode(skb
, mdata
, mlen
);
711 static int tcf_ife_decode(struct sk_buff
*skb
, const struct tc_action
*a
,
712 struct tcf_result
*res
)
714 struct tcf_ife_info
*ife
= to_ife(a
);
715 int action
= ife
->tcf_action
;
720 bstats_cpu_update(this_cpu_ptr(ife
->common
.cpu_bstats
), skb
);
721 tcf_lastuse_update(&ife
->tcf_tm
);
723 if (skb_at_tc_ingress(skb
))
724 skb_push(skb
, skb
->dev
->hard_header_len
);
726 tlv_data
= ife_decode(skb
, &metalen
);
727 if (unlikely(!tlv_data
)) {
728 qstats_drop_inc(this_cpu_ptr(ife
->common
.cpu_qstats
));
732 ifehdr_end
= tlv_data
+ metalen
;
733 for (; tlv_data
< ifehdr_end
; tlv_data
= ife_tlv_meta_next(tlv_data
)) {
738 curr_data
= ife_tlv_meta_decode(tlv_data
, ifehdr_end
, &mtype
,
741 qstats_drop_inc(this_cpu_ptr(ife
->common
.cpu_qstats
));
745 if (find_decode_metaid(skb
, ife
, mtype
, dlen
, curr_data
)) {
746 /* abuse overlimits to count when we receive metadata
747 * but dont have an ops for it
749 pr_info_ratelimited("Unknown metaid %d dlen %d\n",
751 qstats_overlimit_inc(this_cpu_ptr(ife
->common
.cpu_qstats
));
755 if (WARN_ON(tlv_data
!= ifehdr_end
)) {
756 qstats_drop_inc(this_cpu_ptr(ife
->common
.cpu_qstats
));
760 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
761 skb_reset_network_header(skb
);
766 /*XXX: check if we can do this at install time instead of current
769 static int ife_get_sz(struct sk_buff
*skb
, struct tcf_ife_info
*ife
)
771 struct tcf_meta_info
*e
, *n
;
772 int tot_run_sz
= 0, run_sz
= 0;
774 list_for_each_entry_safe(e
, n
, &ife
->metalist
, metalist
) {
775 if (e
->ops
->check_presence
) {
776 run_sz
= e
->ops
->check_presence(skb
, e
);
777 tot_run_sz
+= run_sz
;
784 static int tcf_ife_encode(struct sk_buff
*skb
, const struct tc_action
*a
,
785 struct tcf_result
*res
, struct tcf_ife_params
*p
)
787 struct tcf_ife_info
*ife
= to_ife(a
);
788 int action
= ife
->tcf_action
;
789 struct ethhdr
*oethh
; /* outer ether header */
790 struct tcf_meta_info
*e
;
792 OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
793 where ORIGDATA = original ethernet header ...
795 u16 metalen
= ife_get_sz(skb
, ife
);
796 int hdrm
= metalen
+ skb
->dev
->hard_header_len
+ IFE_METAHDRLEN
;
797 unsigned int skboff
= 0;
798 int new_len
= skb
->len
+ hdrm
;
799 bool exceed_mtu
= false;
803 if (!skb_at_tc_ingress(skb
)) {
804 if (new_len
> skb
->dev
->mtu
)
808 bstats_cpu_update(this_cpu_ptr(ife
->common
.cpu_bstats
), skb
);
809 tcf_lastuse_update(&ife
->tcf_tm
);
811 if (!metalen
) { /* no metadata to send */
812 /* abuse overlimits to count when we allow packet
815 qstats_overlimit_inc(this_cpu_ptr(ife
->common
.cpu_qstats
));
818 /* could be stupid policy setup or mtu config
819 * so lets be conservative.. */
820 if ((action
== TC_ACT_SHOT
) || exceed_mtu
) {
821 qstats_drop_inc(this_cpu_ptr(ife
->common
.cpu_qstats
));
825 if (skb_at_tc_ingress(skb
))
826 skb_push(skb
, skb
->dev
->hard_header_len
);
828 ife_meta
= ife_encode(skb
, metalen
);
830 spin_lock(&ife
->tcf_lock
);
832 /* XXX: we dont have a clever way of telling encode to
833 * not repeat some of the computations that are done by
834 * ops->presence_check...
836 list_for_each_entry(e
, &ife
->metalist
, metalist
) {
837 if (e
->ops
->encode
) {
838 err
= e
->ops
->encode(skb
, (void *)(ife_meta
+ skboff
),
842 /* too corrupt to keep around if overwritten */
843 spin_unlock(&ife
->tcf_lock
);
844 qstats_drop_inc(this_cpu_ptr(ife
->common
.cpu_qstats
));
849 spin_unlock(&ife
->tcf_lock
);
850 oethh
= (struct ethhdr
*)skb
->data
;
852 if (!is_zero_ether_addr(p
->eth_src
))
853 ether_addr_copy(oethh
->h_source
, p
->eth_src
);
854 if (!is_zero_ether_addr(p
->eth_dst
))
855 ether_addr_copy(oethh
->h_dest
, p
->eth_dst
);
856 oethh
->h_proto
= htons(p
->eth_type
);
858 if (skb_at_tc_ingress(skb
))
859 skb_pull(skb
, skb
->dev
->hard_header_len
);
864 static int tcf_ife_act(struct sk_buff
*skb
, const struct tc_action
*a
,
865 struct tcf_result
*res
)
867 struct tcf_ife_info
*ife
= to_ife(a
);
868 struct tcf_ife_params
*p
;
871 p
= rcu_dereference_bh(ife
->params
);
872 if (p
->flags
& IFE_ENCODE
) {
873 ret
= tcf_ife_encode(skb
, a
, res
, p
);
877 return tcf_ife_decode(skb
, a
, res
);
880 static int tcf_ife_walker(struct net
*net
, struct sk_buff
*skb
,
881 struct netlink_callback
*cb
, int type
,
882 const struct tc_action_ops
*ops
,
883 struct netlink_ext_ack
*extack
)
885 struct tc_action_net
*tn
= net_generic(net
, ife_net_id
);
887 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
, extack
);
890 static int tcf_ife_search(struct net
*net
, struct tc_action
**a
, u32 index
)
892 struct tc_action_net
*tn
= net_generic(net
, ife_net_id
);
894 return tcf_idr_search(tn
, a
, index
);
897 static struct tc_action_ops act_ife_ops
= {
900 .owner
= THIS_MODULE
,
902 .dump
= tcf_ife_dump
,
903 .cleanup
= tcf_ife_cleanup
,
904 .init
= tcf_ife_init
,
905 .walk
= tcf_ife_walker
,
906 .lookup
= tcf_ife_search
,
907 .size
= sizeof(struct tcf_ife_info
),
910 static __net_init
int ife_init_net(struct net
*net
)
912 struct tc_action_net
*tn
= net_generic(net
, ife_net_id
);
914 return tc_action_net_init(net
, tn
, &act_ife_ops
);
917 static void __net_exit
ife_exit_net(struct list_head
*net_list
)
919 tc_action_net_exit(net_list
, ife_net_id
);
922 static struct pernet_operations ife_net_ops
= {
923 .init
= ife_init_net
,
924 .exit_batch
= ife_exit_net
,
926 .size
= sizeof(struct tc_action_net
),
929 static int __init
ife_init_module(void)
931 return tcf_register_action(&act_ife_ops
, &ife_net_ops
);
934 static void __exit
ife_cleanup_module(void)
936 tcf_unregister_action(&act_ife_ops
, &ife_net_ops
);
939 module_init(ife_init_module
);
940 module_exit(ife_cleanup_module
);
942 MODULE_AUTHOR("Jamal Hadi Salim(2015)");
943 MODULE_DESCRIPTION("Inter-FE LFB action");
944 MODULE_LICENSE("GPL");