2 * net/sched/ife.c Inter-FE action based on ForCES WG InterFE LFB
5 * draft-ietf-forces-interfelfb-03
8 * "Distributing Linux Traffic Control Classifier-Action
10 * Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 * copyright Jamal Hadi Salim (2015)
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/errno.h>
25 #include <linux/skbuff.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <net/net_namespace.h>
30 #include <net/netlink.h>
31 #include <net/pkt_sched.h>
32 #include <uapi/linux/tc_act/tc_ife.h>
33 #include <net/tc_act/tc_ife.h>
34 #include <linux/etherdevice.h>
37 static unsigned int ife_net_id
;
38 static int max_metacnt
= IFE_META_MAX
+ 1;
39 static struct tc_action_ops act_ife_ops
;
41 static const struct nla_policy ife_policy
[TCA_IFE_MAX
+ 1] = {
42 [TCA_IFE_PARMS
] = { .len
= sizeof(struct tc_ife
)},
43 [TCA_IFE_DMAC
] = { .len
= ETH_ALEN
},
44 [TCA_IFE_SMAC
] = { .len
= ETH_ALEN
},
45 [TCA_IFE_TYPE
] = { .type
= NLA_U16
},
48 int ife_encode_meta_u16(u16 metaval
, void *skbdata
, struct tcf_meta_info
*mi
)
53 edata
= *(u16
*)mi
->metaval
;
57 if (!edata
) /* will not encode */
61 return ife_tlv_meta_encode(skbdata
, mi
->metaid
, 2, &edata
);
63 EXPORT_SYMBOL_GPL(ife_encode_meta_u16
);
65 int ife_get_meta_u32(struct sk_buff
*skb
, struct tcf_meta_info
*mi
)
68 return nla_put_u32(skb
, mi
->metaid
, *(u32
*)mi
->metaval
);
70 return nla_put(skb
, mi
->metaid
, 0, NULL
);
72 EXPORT_SYMBOL_GPL(ife_get_meta_u32
);
74 int ife_check_meta_u32(u32 metaval
, struct tcf_meta_info
*mi
)
76 if (metaval
|| mi
->metaval
)
77 return 8; /* T+L+V == 2+2+4 */
81 EXPORT_SYMBOL_GPL(ife_check_meta_u32
);
83 int ife_check_meta_u16(u16 metaval
, struct tcf_meta_info
*mi
)
85 if (metaval
|| mi
->metaval
)
86 return 8; /* T+L+(V) == 2+2+(2+2bytepad) */
90 EXPORT_SYMBOL_GPL(ife_check_meta_u16
);
92 int ife_encode_meta_u32(u32 metaval
, void *skbdata
, struct tcf_meta_info
*mi
)
97 edata
= *(u32
*)mi
->metaval
;
101 if (!edata
) /* will not encode */
104 edata
= htonl(edata
);
105 return ife_tlv_meta_encode(skbdata
, mi
->metaid
, 4, &edata
);
107 EXPORT_SYMBOL_GPL(ife_encode_meta_u32
);
109 int ife_get_meta_u16(struct sk_buff
*skb
, struct tcf_meta_info
*mi
)
112 return nla_put_u16(skb
, mi
->metaid
, *(u16
*)mi
->metaval
);
114 return nla_put(skb
, mi
->metaid
, 0, NULL
);
116 EXPORT_SYMBOL_GPL(ife_get_meta_u16
);
118 int ife_alloc_meta_u32(struct tcf_meta_info
*mi
, void *metaval
, gfp_t gfp
)
120 mi
->metaval
= kmemdup(metaval
, sizeof(u32
), gfp
);
126 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32
);
128 int ife_alloc_meta_u16(struct tcf_meta_info
*mi
, void *metaval
, gfp_t gfp
)
130 mi
->metaval
= kmemdup(metaval
, sizeof(u16
), gfp
);
136 EXPORT_SYMBOL_GPL(ife_alloc_meta_u16
);
138 void ife_release_meta_gen(struct tcf_meta_info
*mi
)
142 EXPORT_SYMBOL_GPL(ife_release_meta_gen
);
144 int ife_validate_meta_u32(void *val
, int len
)
146 if (len
== sizeof(u32
))
151 EXPORT_SYMBOL_GPL(ife_validate_meta_u32
);
153 int ife_validate_meta_u16(void *val
, int len
)
155 /* length will not include padding */
156 if (len
== sizeof(u16
))
161 EXPORT_SYMBOL_GPL(ife_validate_meta_u16
);
163 static LIST_HEAD(ifeoplist
);
164 static DEFINE_RWLOCK(ife_mod_lock
);
166 static struct tcf_meta_ops
*find_ife_oplist(u16 metaid
)
168 struct tcf_meta_ops
*o
;
170 read_lock(&ife_mod_lock
);
171 list_for_each_entry(o
, &ifeoplist
, list
) {
172 if (o
->metaid
== metaid
) {
173 if (!try_module_get(o
->owner
))
175 read_unlock(&ife_mod_lock
);
179 read_unlock(&ife_mod_lock
);
184 int register_ife_op(struct tcf_meta_ops
*mops
)
186 struct tcf_meta_ops
*m
;
188 if (!mops
->metaid
|| !mops
->metatype
|| !mops
->name
||
189 !mops
->check_presence
|| !mops
->encode
|| !mops
->decode
||
190 !mops
->get
|| !mops
->alloc
)
193 write_lock(&ife_mod_lock
);
195 list_for_each_entry(m
, &ifeoplist
, list
) {
196 if (m
->metaid
== mops
->metaid
||
197 (strcmp(mops
->name
, m
->name
) == 0)) {
198 write_unlock(&ife_mod_lock
);
204 mops
->release
= ife_release_meta_gen
;
206 list_add_tail(&mops
->list
, &ifeoplist
);
207 write_unlock(&ife_mod_lock
);
210 EXPORT_SYMBOL_GPL(unregister_ife_op
);
212 int unregister_ife_op(struct tcf_meta_ops
*mops
)
214 struct tcf_meta_ops
*m
;
217 write_lock(&ife_mod_lock
);
218 list_for_each_entry(m
, &ifeoplist
, list
) {
219 if (m
->metaid
== mops
->metaid
) {
220 list_del(&mops
->list
);
225 write_unlock(&ife_mod_lock
);
229 EXPORT_SYMBOL_GPL(register_ife_op
);
231 static int ife_validate_metatype(struct tcf_meta_ops
*ops
, void *val
, int len
)
234 /* XXX: unfortunately cant use nla_policy at this point
235 * because a length of 0 is valid in the case of
236 * "allow". "use" semantics do enforce for proper
237 * length and i couldve use nla_policy but it makes it hard
238 * to use it just for that..
241 return ops
->validate(val
, len
);
243 if (ops
->metatype
== NLA_U32
)
244 ret
= ife_validate_meta_u32(val
, len
);
245 else if (ops
->metatype
== NLA_U16
)
246 ret
= ife_validate_meta_u16(val
, len
);
251 /* called when adding new meta information
253 static int load_metaops_and_vet(u32 metaid
, void *val
, int len
)
255 struct tcf_meta_ops
*ops
= find_ife_oplist(metaid
);
260 #ifdef CONFIG_MODULES
262 request_module("ifemeta%u", metaid
);
264 ops
= find_ife_oplist(metaid
);
271 ret
= ife_validate_metatype(ops
, val
, len
);
273 module_put(ops
->owner
);
279 /* called when adding new meta information
281 static int __add_metainfo(const struct tcf_meta_ops
*ops
,
282 struct tcf_ife_info
*ife
, u32 metaid
, void *metaval
,
283 int len
, bool atomic
, bool exists
)
285 struct tcf_meta_info
*mi
= NULL
;
288 mi
= kzalloc(sizeof(*mi
), atomic
? GFP_ATOMIC
: GFP_KERNEL
);
295 ret
= ops
->alloc(mi
, metaval
, atomic
? GFP_ATOMIC
: GFP_KERNEL
);
303 spin_lock_bh(&ife
->tcf_lock
);
304 list_add_tail(&mi
->metalist
, &ife
->metalist
);
306 spin_unlock_bh(&ife
->tcf_lock
);
311 static int add_metainfo_and_get_ops(const struct tcf_meta_ops
*ops
,
312 struct tcf_ife_info
*ife
, u32 metaid
,
317 if (!try_module_get(ops
->owner
))
319 ret
= __add_metainfo(ops
, ife
, metaid
, NULL
, 0, true, exists
);
321 module_put(ops
->owner
);
325 static int add_metainfo(struct tcf_ife_info
*ife
, u32 metaid
, void *metaval
,
326 int len
, bool exists
)
328 const struct tcf_meta_ops
*ops
= find_ife_oplist(metaid
);
333 ret
= __add_metainfo(ops
, ife
, metaid
, metaval
, len
, false, exists
);
335 /*put back what find_ife_oplist took */
336 module_put(ops
->owner
);
340 static int use_all_metadata(struct tcf_ife_info
*ife
, bool exists
)
342 struct tcf_meta_ops
*o
;
346 read_lock(&ife_mod_lock
);
347 list_for_each_entry(o
, &ifeoplist
, list
) {
348 rc
= add_metainfo_and_get_ops(o
, ife
, o
->metaid
, exists
);
352 read_unlock(&ife_mod_lock
);
360 static int dump_metalist(struct sk_buff
*skb
, struct tcf_ife_info
*ife
)
362 struct tcf_meta_info
*e
;
364 unsigned char *b
= skb_tail_pointer(skb
);
365 int total_encoded
= 0;
367 /*can only happen on decode */
368 if (list_empty(&ife
->metalist
))
371 nest
= nla_nest_start(skb
, TCA_IFE_METALST
);
375 list_for_each_entry(e
, &ife
->metalist
, metalist
) {
376 if (!e
->ops
->get(skb
, e
))
383 nla_nest_end(skb
, nest
);
392 /* under ife->tcf_lock */
393 static void _tcf_ife_cleanup(struct tc_action
*a
, int bind
)
395 struct tcf_ife_info
*ife
= to_ife(a
);
396 struct tcf_meta_info
*e
, *n
;
398 list_for_each_entry_safe(e
, n
, &ife
->metalist
, metalist
) {
399 list_del(&e
->metalist
);
406 module_put(e
->ops
->owner
);
411 static void tcf_ife_cleanup(struct tc_action
*a
, int bind
)
413 struct tcf_ife_info
*ife
= to_ife(a
);
415 spin_lock_bh(&ife
->tcf_lock
);
416 _tcf_ife_cleanup(a
, bind
);
417 spin_unlock_bh(&ife
->tcf_lock
);
420 static int populate_metalist(struct tcf_ife_info
*ife
, struct nlattr
**tb
,
428 for (i
= 1; i
< max_metacnt
; i
++) {
430 val
= nla_data(tb
[i
]);
431 len
= nla_len(tb
[i
]);
433 rc
= load_metaops_and_vet(i
, val
, len
);
437 rc
= add_metainfo(ife
, i
, val
, len
, exists
);
446 static int tcf_ife_init(struct net
*net
, struct nlattr
*nla
,
447 struct nlattr
*est
, struct tc_action
**a
,
450 struct tc_action_net
*tn
= net_generic(net
, ife_net_id
);
451 struct nlattr
*tb
[TCA_IFE_MAX
+ 1];
452 struct nlattr
*tb2
[IFE_META_MAX
+ 1];
453 struct tcf_ife_info
*ife
;
454 u16 ife_type
= ETH_P_IFE
;
462 err
= nla_parse_nested(tb
, TCA_IFE_MAX
, nla
, ife_policy
, NULL
);
466 if (!tb
[TCA_IFE_PARMS
])
469 parm
= nla_data(tb
[TCA_IFE_PARMS
]);
471 exists
= tcf_idr_check(tn
, parm
->index
, a
, bind
);
476 ret
= tcf_idr_create(tn
, parm
->index
, est
, a
, &act_ife_ops
,
482 tcf_idr_release(*a
, bind
);
488 ife
->flags
= parm
->flags
;
490 if (parm
->flags
& IFE_ENCODE
) {
491 if (tb
[TCA_IFE_TYPE
])
492 ife_type
= nla_get_u16(tb
[TCA_IFE_TYPE
]);
493 if (tb
[TCA_IFE_DMAC
])
494 daddr
= nla_data(tb
[TCA_IFE_DMAC
]);
495 if (tb
[TCA_IFE_SMAC
])
496 saddr
= nla_data(tb
[TCA_IFE_SMAC
]);
500 spin_lock_bh(&ife
->tcf_lock
);
501 ife
->tcf_action
= parm
->action
;
503 spin_unlock_bh(&ife
->tcf_lock
);
505 if (parm
->flags
& IFE_ENCODE
) {
507 ether_addr_copy(ife
->eth_dst
, daddr
);
509 eth_zero_addr(ife
->eth_dst
);
512 ether_addr_copy(ife
->eth_src
, saddr
);
514 eth_zero_addr(ife
->eth_src
);
516 ife
->eth_type
= ife_type
;
519 if (ret
== ACT_P_CREATED
)
520 INIT_LIST_HEAD(&ife
->metalist
);
522 if (tb
[TCA_IFE_METALST
]) {
523 err
= nla_parse_nested(tb2
, IFE_META_MAX
, tb
[TCA_IFE_METALST
],
528 tcf_idr_release(*a
, bind
);
529 if (ret
== ACT_P_CREATED
)
530 _tcf_ife_cleanup(*a
, bind
);
534 err
= populate_metalist(ife
, tb2
, exists
);
536 goto metadata_parse_err
;
539 /* if no passed metadata allow list or passed allow-all
540 * then here we process by adding as many supported metadatum
541 * as we can. You better have at least one else we are
544 err
= use_all_metadata(ife
, exists
);
546 if (ret
== ACT_P_CREATED
)
547 _tcf_ife_cleanup(*a
, bind
);
552 if (ret
== ACT_P_CREATED
)
553 tcf_idr_insert(tn
, *a
);
558 static int tcf_ife_dump(struct sk_buff
*skb
, struct tc_action
*a
, int bind
,
561 unsigned char *b
= skb_tail_pointer(skb
);
562 struct tcf_ife_info
*ife
= to_ife(a
);
563 struct tc_ife opt
= {
564 .index
= ife
->tcf_index
,
565 .refcnt
= ife
->tcf_refcnt
- ref
,
566 .bindcnt
= ife
->tcf_bindcnt
- bind
,
567 .action
= ife
->tcf_action
,
572 if (nla_put(skb
, TCA_IFE_PARMS
, sizeof(opt
), &opt
))
573 goto nla_put_failure
;
575 tcf_tm_dump(&t
, &ife
->tcf_tm
);
576 if (nla_put_64bit(skb
, TCA_IFE_TM
, sizeof(t
), &t
, TCA_IFE_PAD
))
577 goto nla_put_failure
;
579 if (!is_zero_ether_addr(ife
->eth_dst
)) {
580 if (nla_put(skb
, TCA_IFE_DMAC
, ETH_ALEN
, ife
->eth_dst
))
581 goto nla_put_failure
;
584 if (!is_zero_ether_addr(ife
->eth_src
)) {
585 if (nla_put(skb
, TCA_IFE_SMAC
, ETH_ALEN
, ife
->eth_src
))
586 goto nla_put_failure
;
589 if (nla_put(skb
, TCA_IFE_TYPE
, 2, &ife
->eth_type
))
590 goto nla_put_failure
;
592 if (dump_metalist(skb
, ife
)) {
593 /*ignore failure to dump metalist */
594 pr_info("Failed to dump metalist\n");
604 static int find_decode_metaid(struct sk_buff
*skb
, struct tcf_ife_info
*ife
,
605 u16 metaid
, u16 mlen
, void *mdata
)
607 struct tcf_meta_info
*e
;
609 /* XXX: use hash to speed up */
610 list_for_each_entry(e
, &ife
->metalist
, metalist
) {
611 if (metaid
== e
->metaid
) {
613 /* We check for decode presence already */
614 return e
->ops
->decode(skb
, mdata
, mlen
);
622 static int tcf_ife_decode(struct sk_buff
*skb
, const struct tc_action
*a
,
623 struct tcf_result
*res
)
625 struct tcf_ife_info
*ife
= to_ife(a
);
626 int action
= ife
->tcf_action
;
631 spin_lock(&ife
->tcf_lock
);
632 bstats_update(&ife
->tcf_bstats
, skb
);
633 tcf_lastuse_update(&ife
->tcf_tm
);
634 spin_unlock(&ife
->tcf_lock
);
636 if (skb_at_tc_ingress(skb
))
637 skb_push(skb
, skb
->dev
->hard_header_len
);
639 tlv_data
= ife_decode(skb
, &metalen
);
640 if (unlikely(!tlv_data
)) {
641 spin_lock(&ife
->tcf_lock
);
642 ife
->tcf_qstats
.drops
++;
643 spin_unlock(&ife
->tcf_lock
);
647 ifehdr_end
= tlv_data
+ metalen
;
648 for (; tlv_data
< ifehdr_end
; tlv_data
= ife_tlv_meta_next(tlv_data
)) {
653 curr_data
= ife_tlv_meta_decode(tlv_data
, ifehdr_end
, &mtype
,
656 qstats_drop_inc(this_cpu_ptr(ife
->common
.cpu_qstats
));
660 if (find_decode_metaid(skb
, ife
, mtype
, dlen
, curr_data
)) {
661 /* abuse overlimits to count when we receive metadata
662 * but dont have an ops for it
664 pr_info_ratelimited("Unknown metaid %d dlen %d\n",
666 ife
->tcf_qstats
.overlimits
++;
670 if (WARN_ON(tlv_data
!= ifehdr_end
)) {
671 spin_lock(&ife
->tcf_lock
);
672 ife
->tcf_qstats
.drops
++;
673 spin_unlock(&ife
->tcf_lock
);
677 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
678 skb_reset_network_header(skb
);
683 /*XXX: check if we can do this at install time instead of current
686 static int ife_get_sz(struct sk_buff
*skb
, struct tcf_ife_info
*ife
)
688 struct tcf_meta_info
*e
, *n
;
689 int tot_run_sz
= 0, run_sz
= 0;
691 list_for_each_entry_safe(e
, n
, &ife
->metalist
, metalist
) {
692 if (e
->ops
->check_presence
) {
693 run_sz
= e
->ops
->check_presence(skb
, e
);
694 tot_run_sz
+= run_sz
;
701 static int tcf_ife_encode(struct sk_buff
*skb
, const struct tc_action
*a
,
702 struct tcf_result
*res
)
704 struct tcf_ife_info
*ife
= to_ife(a
);
705 int action
= ife
->tcf_action
;
706 struct ethhdr
*oethh
; /* outer ether header */
707 struct tcf_meta_info
*e
;
709 OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
710 where ORIGDATA = original ethernet header ...
712 u16 metalen
= ife_get_sz(skb
, ife
);
713 int hdrm
= metalen
+ skb
->dev
->hard_header_len
+ IFE_METAHDRLEN
;
714 unsigned int skboff
= 0;
715 int new_len
= skb
->len
+ hdrm
;
716 bool exceed_mtu
= false;
720 if (!skb_at_tc_ingress(skb
)) {
721 if (new_len
> skb
->dev
->mtu
)
725 spin_lock(&ife
->tcf_lock
);
726 bstats_update(&ife
->tcf_bstats
, skb
);
727 tcf_lastuse_update(&ife
->tcf_tm
);
729 if (!metalen
) { /* no metadata to send */
730 /* abuse overlimits to count when we allow packet
733 ife
->tcf_qstats
.overlimits
++;
734 spin_unlock(&ife
->tcf_lock
);
737 /* could be stupid policy setup or mtu config
738 * so lets be conservative.. */
739 if ((action
== TC_ACT_SHOT
) || exceed_mtu
) {
740 ife
->tcf_qstats
.drops
++;
741 spin_unlock(&ife
->tcf_lock
);
745 if (skb_at_tc_ingress(skb
))
746 skb_push(skb
, skb
->dev
->hard_header_len
);
748 ife_meta
= ife_encode(skb
, metalen
);
750 /* XXX: we dont have a clever way of telling encode to
751 * not repeat some of the computations that are done by
752 * ops->presence_check...
754 list_for_each_entry(e
, &ife
->metalist
, metalist
) {
755 if (e
->ops
->encode
) {
756 err
= e
->ops
->encode(skb
, (void *)(ife_meta
+ skboff
),
760 /* too corrupt to keep around if overwritten */
761 ife
->tcf_qstats
.drops
++;
762 spin_unlock(&ife
->tcf_lock
);
767 oethh
= (struct ethhdr
*)skb
->data
;
769 if (!is_zero_ether_addr(ife
->eth_src
))
770 ether_addr_copy(oethh
->h_source
, ife
->eth_src
);
771 if (!is_zero_ether_addr(ife
->eth_dst
))
772 ether_addr_copy(oethh
->h_dest
, ife
->eth_dst
);
773 oethh
->h_proto
= htons(ife
->eth_type
);
775 if (skb_at_tc_ingress(skb
))
776 skb_pull(skb
, skb
->dev
->hard_header_len
);
778 spin_unlock(&ife
->tcf_lock
);
783 static int tcf_ife_act(struct sk_buff
*skb
, const struct tc_action
*a
,
784 struct tcf_result
*res
)
786 struct tcf_ife_info
*ife
= to_ife(a
);
788 if (ife
->flags
& IFE_ENCODE
)
789 return tcf_ife_encode(skb
, a
, res
);
791 if (!(ife
->flags
& IFE_ENCODE
))
792 return tcf_ife_decode(skb
, a
, res
);
794 pr_info_ratelimited("unknown failure(policy neither de/encode\n");
795 spin_lock(&ife
->tcf_lock
);
796 bstats_update(&ife
->tcf_bstats
, skb
);
797 tcf_lastuse_update(&ife
->tcf_tm
);
798 ife
->tcf_qstats
.drops
++;
799 spin_unlock(&ife
->tcf_lock
);
804 static int tcf_ife_walker(struct net
*net
, struct sk_buff
*skb
,
805 struct netlink_callback
*cb
, int type
,
806 const struct tc_action_ops
*ops
)
808 struct tc_action_net
*tn
= net_generic(net
, ife_net_id
);
810 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
);
813 static int tcf_ife_search(struct net
*net
, struct tc_action
**a
, u32 index
)
815 struct tc_action_net
*tn
= net_generic(net
, ife_net_id
);
817 return tcf_idr_search(tn
, a
, index
);
820 static struct tc_action_ops act_ife_ops
= {
823 .owner
= THIS_MODULE
,
825 .dump
= tcf_ife_dump
,
826 .cleanup
= tcf_ife_cleanup
,
827 .init
= tcf_ife_init
,
828 .walk
= tcf_ife_walker
,
829 .lookup
= tcf_ife_search
,
830 .size
= sizeof(struct tcf_ife_info
),
833 static __net_init
int ife_init_net(struct net
*net
)
835 struct tc_action_net
*tn
= net_generic(net
, ife_net_id
);
837 return tc_action_net_init(tn
, &act_ife_ops
);
840 static void __net_exit
ife_exit_net(struct net
*net
)
842 struct tc_action_net
*tn
= net_generic(net
, ife_net_id
);
844 tc_action_net_exit(tn
);
847 static struct pernet_operations ife_net_ops
= {
848 .init
= ife_init_net
,
849 .exit
= ife_exit_net
,
851 .size
= sizeof(struct tc_action_net
),
854 static int __init
ife_init_module(void)
856 return tcf_register_action(&act_ife_ops
, &ife_net_ops
);
859 static void __exit
ife_cleanup_module(void)
861 tcf_unregister_action(&act_ife_ops
, &ife_net_ops
);
864 module_init(ife_init_module
);
865 module_exit(ife_cleanup_module
);
867 MODULE_AUTHOR("Jamal Hadi Salim(2015)");
868 MODULE_DESCRIPTION("Inter-FE LFB action");
869 MODULE_LICENSE("GPL");