2 * net/sched/ife.c Inter-FE action based on ForCES WG InterFE LFB
5 * draft-ietf-forces-interfelfb-03
8 * "Distributing Linux Traffic Control Classifier-Action
10 * Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 * copyright Jamal Hadi Salim (2015)
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/errno.h>
25 #include <linux/skbuff.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <net/net_namespace.h>
30 #include <net/netlink.h>
31 #include <net/pkt_sched.h>
32 #include <uapi/linux/tc_act/tc_ife.h>
33 #include <net/tc_act/tc_ife.h>
34 #include <linux/etherdevice.h>
37 #define IFE_TAB_MASK 15
39 static unsigned int ife_net_id
;
40 static int max_metacnt
= IFE_META_MAX
+ 1;
41 static struct tc_action_ops act_ife_ops
;
43 static const struct nla_policy ife_policy
[TCA_IFE_MAX
+ 1] = {
44 [TCA_IFE_PARMS
] = { .len
= sizeof(struct tc_ife
)},
45 [TCA_IFE_DMAC
] = { .len
= ETH_ALEN
},
46 [TCA_IFE_SMAC
] = { .len
= ETH_ALEN
},
47 [TCA_IFE_TYPE
] = { .type
= NLA_U16
},
50 int ife_encode_meta_u16(u16 metaval
, void *skbdata
, struct tcf_meta_info
*mi
)
55 edata
= *(u16
*)mi
->metaval
;
59 if (!edata
) /* will not encode */
63 return ife_tlv_meta_encode(skbdata
, mi
->metaid
, 2, &edata
);
65 EXPORT_SYMBOL_GPL(ife_encode_meta_u16
);
67 int ife_get_meta_u32(struct sk_buff
*skb
, struct tcf_meta_info
*mi
)
70 return nla_put_u32(skb
, mi
->metaid
, *(u32
*)mi
->metaval
);
72 return nla_put(skb
, mi
->metaid
, 0, NULL
);
74 EXPORT_SYMBOL_GPL(ife_get_meta_u32
);
76 int ife_check_meta_u32(u32 metaval
, struct tcf_meta_info
*mi
)
78 if (metaval
|| mi
->metaval
)
79 return 8; /* T+L+V == 2+2+4 */
83 EXPORT_SYMBOL_GPL(ife_check_meta_u32
);
85 int ife_check_meta_u16(u16 metaval
, struct tcf_meta_info
*mi
)
87 if (metaval
|| mi
->metaval
)
88 return 8; /* T+L+(V) == 2+2+(2+2bytepad) */
92 EXPORT_SYMBOL_GPL(ife_check_meta_u16
);
94 int ife_encode_meta_u32(u32 metaval
, void *skbdata
, struct tcf_meta_info
*mi
)
99 edata
= *(u32
*)mi
->metaval
;
103 if (!edata
) /* will not encode */
106 edata
= htonl(edata
);
107 return ife_tlv_meta_encode(skbdata
, mi
->metaid
, 4, &edata
);
109 EXPORT_SYMBOL_GPL(ife_encode_meta_u32
);
111 int ife_get_meta_u16(struct sk_buff
*skb
, struct tcf_meta_info
*mi
)
114 return nla_put_u16(skb
, mi
->metaid
, *(u16
*)mi
->metaval
);
116 return nla_put(skb
, mi
->metaid
, 0, NULL
);
118 EXPORT_SYMBOL_GPL(ife_get_meta_u16
);
120 int ife_alloc_meta_u32(struct tcf_meta_info
*mi
, void *metaval
, gfp_t gfp
)
122 mi
->metaval
= kmemdup(metaval
, sizeof(u32
), gfp
);
128 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32
);
130 int ife_alloc_meta_u16(struct tcf_meta_info
*mi
, void *metaval
, gfp_t gfp
)
132 mi
->metaval
= kmemdup(metaval
, sizeof(u16
), gfp
);
138 EXPORT_SYMBOL_GPL(ife_alloc_meta_u16
);
140 void ife_release_meta_gen(struct tcf_meta_info
*mi
)
144 EXPORT_SYMBOL_GPL(ife_release_meta_gen
);
146 int ife_validate_meta_u32(void *val
, int len
)
148 if (len
== sizeof(u32
))
153 EXPORT_SYMBOL_GPL(ife_validate_meta_u32
);
155 int ife_validate_meta_u16(void *val
, int len
)
157 /* length will not include padding */
158 if (len
== sizeof(u16
))
163 EXPORT_SYMBOL_GPL(ife_validate_meta_u16
);
165 static LIST_HEAD(ifeoplist
);
166 static DEFINE_RWLOCK(ife_mod_lock
);
168 static struct tcf_meta_ops
*find_ife_oplist(u16 metaid
)
170 struct tcf_meta_ops
*o
;
172 read_lock(&ife_mod_lock
);
173 list_for_each_entry(o
, &ifeoplist
, list
) {
174 if (o
->metaid
== metaid
) {
175 if (!try_module_get(o
->owner
))
177 read_unlock(&ife_mod_lock
);
181 read_unlock(&ife_mod_lock
);
186 int register_ife_op(struct tcf_meta_ops
*mops
)
188 struct tcf_meta_ops
*m
;
190 if (!mops
->metaid
|| !mops
->metatype
|| !mops
->name
||
191 !mops
->check_presence
|| !mops
->encode
|| !mops
->decode
||
192 !mops
->get
|| !mops
->alloc
)
195 write_lock(&ife_mod_lock
);
197 list_for_each_entry(m
, &ifeoplist
, list
) {
198 if (m
->metaid
== mops
->metaid
||
199 (strcmp(mops
->name
, m
->name
) == 0)) {
200 write_unlock(&ife_mod_lock
);
206 mops
->release
= ife_release_meta_gen
;
208 list_add_tail(&mops
->list
, &ifeoplist
);
209 write_unlock(&ife_mod_lock
);
212 EXPORT_SYMBOL_GPL(unregister_ife_op
);
214 int unregister_ife_op(struct tcf_meta_ops
*mops
)
216 struct tcf_meta_ops
*m
;
219 write_lock(&ife_mod_lock
);
220 list_for_each_entry(m
, &ifeoplist
, list
) {
221 if (m
->metaid
== mops
->metaid
) {
222 list_del(&mops
->list
);
227 write_unlock(&ife_mod_lock
);
231 EXPORT_SYMBOL_GPL(register_ife_op
);
233 static int ife_validate_metatype(struct tcf_meta_ops
*ops
, void *val
, int len
)
236 /* XXX: unfortunately cant use nla_policy at this point
237 * because a length of 0 is valid in the case of
238 * "allow". "use" semantics do enforce for proper
239 * length and i couldve use nla_policy but it makes it hard
240 * to use it just for that..
243 return ops
->validate(val
, len
);
245 if (ops
->metatype
== NLA_U32
)
246 ret
= ife_validate_meta_u32(val
, len
);
247 else if (ops
->metatype
== NLA_U16
)
248 ret
= ife_validate_meta_u16(val
, len
);
253 /* called when adding new meta information
254 * under ife->tcf_lock for existing action
256 static int load_metaops_and_vet(struct tcf_ife_info
*ife
, u32 metaid
,
257 void *val
, int len
, bool exists
)
259 struct tcf_meta_ops
*ops
= find_ife_oplist(metaid
);
264 #ifdef CONFIG_MODULES
266 spin_unlock_bh(&ife
->tcf_lock
);
268 request_module("ifemeta%u", metaid
);
271 spin_lock_bh(&ife
->tcf_lock
);
272 ops
= find_ife_oplist(metaid
);
279 ret
= ife_validate_metatype(ops
, val
, len
);
281 module_put(ops
->owner
);
287 /* called when adding new meta information
288 * under ife->tcf_lock for existing action
290 static int add_metainfo(struct tcf_ife_info
*ife
, u32 metaid
, void *metaval
,
291 int len
, bool atomic
)
293 struct tcf_meta_info
*mi
= NULL
;
294 struct tcf_meta_ops
*ops
= find_ife_oplist(metaid
);
300 mi
= kzalloc(sizeof(*mi
), atomic
? GFP_ATOMIC
: GFP_KERNEL
);
302 /*put back what find_ife_oplist took */
303 module_put(ops
->owner
);
310 ret
= ops
->alloc(mi
, metaval
, atomic
? GFP_ATOMIC
: GFP_KERNEL
);
313 module_put(ops
->owner
);
318 list_add_tail(&mi
->metalist
, &ife
->metalist
);
323 static int use_all_metadata(struct tcf_ife_info
*ife
)
325 struct tcf_meta_ops
*o
;
329 read_lock(&ife_mod_lock
);
330 list_for_each_entry(o
, &ifeoplist
, list
) {
331 rc
= add_metainfo(ife
, o
->metaid
, NULL
, 0, true);
335 read_unlock(&ife_mod_lock
);
343 static int dump_metalist(struct sk_buff
*skb
, struct tcf_ife_info
*ife
)
345 struct tcf_meta_info
*e
;
347 unsigned char *b
= skb_tail_pointer(skb
);
348 int total_encoded
= 0;
350 /*can only happen on decode */
351 if (list_empty(&ife
->metalist
))
354 nest
= nla_nest_start(skb
, TCA_IFE_METALST
);
358 list_for_each_entry(e
, &ife
->metalist
, metalist
) {
359 if (!e
->ops
->get(skb
, e
))
366 nla_nest_end(skb
, nest
);
375 /* under ife->tcf_lock */
376 static void _tcf_ife_cleanup(struct tc_action
*a
, int bind
)
378 struct tcf_ife_info
*ife
= to_ife(a
);
379 struct tcf_meta_info
*e
, *n
;
381 list_for_each_entry_safe(e
, n
, &ife
->metalist
, metalist
) {
382 module_put(e
->ops
->owner
);
383 list_del(&e
->metalist
);
394 static void tcf_ife_cleanup(struct tc_action
*a
, int bind
)
396 struct tcf_ife_info
*ife
= to_ife(a
);
398 spin_lock_bh(&ife
->tcf_lock
);
399 _tcf_ife_cleanup(a
, bind
);
400 spin_unlock_bh(&ife
->tcf_lock
);
403 /* under ife->tcf_lock for existing action */
404 static int populate_metalist(struct tcf_ife_info
*ife
, struct nlattr
**tb
,
412 for (i
= 1; i
< max_metacnt
; i
++) {
414 val
= nla_data(tb
[i
]);
415 len
= nla_len(tb
[i
]);
417 rc
= load_metaops_and_vet(ife
, i
, val
, len
, exists
);
421 rc
= add_metainfo(ife
, i
, val
, len
, exists
);
430 static int tcf_ife_init(struct net
*net
, struct nlattr
*nla
,
431 struct nlattr
*est
, struct tc_action
**a
,
434 struct tc_action_net
*tn
= net_generic(net
, ife_net_id
);
435 struct nlattr
*tb
[TCA_IFE_MAX
+ 1];
436 struct nlattr
*tb2
[IFE_META_MAX
+ 1];
437 struct tcf_ife_info
*ife
;
446 err
= nla_parse_nested(tb
, TCA_IFE_MAX
, nla
, ife_policy
);
450 if (!tb
[TCA_IFE_PARMS
])
453 parm
= nla_data(tb
[TCA_IFE_PARMS
]);
455 exists
= tcf_hash_check(tn
, parm
->index
, a
, bind
);
459 if (parm
->flags
& IFE_ENCODE
) {
460 /* Until we get issued the ethertype, we cant have
463 if (!tb
[TCA_IFE_TYPE
]) {
465 tcf_hash_release(*a
, bind
);
466 pr_info("You MUST pass etherype for encoding\n");
472 ret
= tcf_hash_create(tn
, parm
->index
, est
, a
, &act_ife_ops
,
478 tcf_hash_release(*a
, bind
);
484 ife
->flags
= parm
->flags
;
486 if (parm
->flags
& IFE_ENCODE
) {
487 ife_type
= nla_get_u16(tb
[TCA_IFE_TYPE
]);
488 if (tb
[TCA_IFE_DMAC
])
489 daddr
= nla_data(tb
[TCA_IFE_DMAC
]);
490 if (tb
[TCA_IFE_SMAC
])
491 saddr
= nla_data(tb
[TCA_IFE_SMAC
]);
495 spin_lock_bh(&ife
->tcf_lock
);
496 ife
->tcf_action
= parm
->action
;
498 if (parm
->flags
& IFE_ENCODE
) {
500 ether_addr_copy(ife
->eth_dst
, daddr
);
502 eth_zero_addr(ife
->eth_dst
);
505 ether_addr_copy(ife
->eth_src
, saddr
);
507 eth_zero_addr(ife
->eth_src
);
509 ife
->eth_type
= ife_type
;
512 if (ret
== ACT_P_CREATED
)
513 INIT_LIST_HEAD(&ife
->metalist
);
515 if (tb
[TCA_IFE_METALST
]) {
516 err
= nla_parse_nested(tb2
, IFE_META_MAX
, tb
[TCA_IFE_METALST
],
521 tcf_hash_release(*a
, bind
);
522 if (ret
== ACT_P_CREATED
)
523 _tcf_ife_cleanup(*a
, bind
);
526 spin_unlock_bh(&ife
->tcf_lock
);
530 err
= populate_metalist(ife
, tb2
, exists
);
532 goto metadata_parse_err
;
535 /* if no passed metadata allow list or passed allow-all
536 * then here we process by adding as many supported metadatum
537 * as we can. You better have at least one else we are
540 err
= use_all_metadata(ife
);
542 if (ret
== ACT_P_CREATED
)
543 _tcf_ife_cleanup(*a
, bind
);
546 spin_unlock_bh(&ife
->tcf_lock
);
552 spin_unlock_bh(&ife
->tcf_lock
);
554 if (ret
== ACT_P_CREATED
)
555 tcf_hash_insert(tn
, *a
);
560 static int tcf_ife_dump(struct sk_buff
*skb
, struct tc_action
*a
, int bind
,
563 unsigned char *b
= skb_tail_pointer(skb
);
564 struct tcf_ife_info
*ife
= to_ife(a
);
565 struct tc_ife opt
= {
566 .index
= ife
->tcf_index
,
567 .refcnt
= ife
->tcf_refcnt
- ref
,
568 .bindcnt
= ife
->tcf_bindcnt
- bind
,
569 .action
= ife
->tcf_action
,
574 if (nla_put(skb
, TCA_IFE_PARMS
, sizeof(opt
), &opt
))
575 goto nla_put_failure
;
577 tcf_tm_dump(&t
, &ife
->tcf_tm
);
578 if (nla_put_64bit(skb
, TCA_IFE_TM
, sizeof(t
), &t
, TCA_IFE_PAD
))
579 goto nla_put_failure
;
581 if (!is_zero_ether_addr(ife
->eth_dst
)) {
582 if (nla_put(skb
, TCA_IFE_DMAC
, ETH_ALEN
, ife
->eth_dst
))
583 goto nla_put_failure
;
586 if (!is_zero_ether_addr(ife
->eth_src
)) {
587 if (nla_put(skb
, TCA_IFE_SMAC
, ETH_ALEN
, ife
->eth_src
))
588 goto nla_put_failure
;
591 if (nla_put(skb
, TCA_IFE_TYPE
, 2, &ife
->eth_type
))
592 goto nla_put_failure
;
594 if (dump_metalist(skb
, ife
)) {
595 /*ignore failure to dump metalist */
596 pr_info("Failed to dump metalist\n");
606 int find_decode_metaid(struct sk_buff
*skb
, struct tcf_ife_info
*ife
,
607 u16 metaid
, u16 mlen
, void *mdata
)
609 struct tcf_meta_info
*e
;
611 /* XXX: use hash to speed up */
612 list_for_each_entry(e
, &ife
->metalist
, metalist
) {
613 if (metaid
== e
->metaid
) {
615 /* We check for decode presence already */
616 return e
->ops
->decode(skb
, mdata
, mlen
);
624 static int tcf_ife_decode(struct sk_buff
*skb
, const struct tc_action
*a
,
625 struct tcf_result
*res
)
627 struct tcf_ife_info
*ife
= to_ife(a
);
628 int action
= ife
->tcf_action
;
633 spin_lock(&ife
->tcf_lock
);
634 bstats_update(&ife
->tcf_bstats
, skb
);
635 tcf_lastuse_update(&ife
->tcf_tm
);
636 spin_unlock(&ife
->tcf_lock
);
638 if (skb_at_tc_ingress(skb
))
639 skb_push(skb
, skb
->dev
->hard_header_len
);
641 tlv_data
= ife_decode(skb
, &metalen
);
642 if (unlikely(!tlv_data
)) {
643 spin_lock(&ife
->tcf_lock
);
644 ife
->tcf_qstats
.drops
++;
645 spin_unlock(&ife
->tcf_lock
);
649 ifehdr_end
= tlv_data
+ metalen
;
650 for (; tlv_data
< ifehdr_end
; tlv_data
= ife_tlv_meta_next(tlv_data
)) {
655 curr_data
= ife_tlv_meta_decode(tlv_data
, &mtype
, &dlen
, NULL
);
657 if (find_decode_metaid(skb
, ife
, mtype
, dlen
, curr_data
)) {
658 /* abuse overlimits to count when we receive metadata
659 * but dont have an ops for it
661 pr_info_ratelimited("Unknown metaid %d dlen %d\n",
663 ife
->tcf_qstats
.overlimits
++;
667 if (WARN_ON(tlv_data
!= ifehdr_end
)) {
668 spin_lock(&ife
->tcf_lock
);
669 ife
->tcf_qstats
.drops
++;
670 spin_unlock(&ife
->tcf_lock
);
674 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
675 skb_reset_network_header(skb
);
680 /*XXX: check if we can do this at install time instead of current
683 static int ife_get_sz(struct sk_buff
*skb
, struct tcf_ife_info
*ife
)
685 struct tcf_meta_info
*e
, *n
;
686 int tot_run_sz
= 0, run_sz
= 0;
688 list_for_each_entry_safe(e
, n
, &ife
->metalist
, metalist
) {
689 if (e
->ops
->check_presence
) {
690 run_sz
= e
->ops
->check_presence(skb
, e
);
691 tot_run_sz
+= run_sz
;
698 static int tcf_ife_encode(struct sk_buff
*skb
, const struct tc_action
*a
,
699 struct tcf_result
*res
)
701 struct tcf_ife_info
*ife
= to_ife(a
);
702 int action
= ife
->tcf_action
;
703 struct ethhdr
*oethh
; /* outer ether header */
704 struct tcf_meta_info
*e
;
706 OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
707 where ORIGDATA = original ethernet header ...
709 u16 metalen
= ife_get_sz(skb
, ife
);
710 int hdrm
= metalen
+ skb
->dev
->hard_header_len
+ IFE_METAHDRLEN
;
711 unsigned int skboff
= 0;
712 int new_len
= skb
->len
+ hdrm
;
713 bool exceed_mtu
= false;
717 if (!skb_at_tc_ingress(skb
)) {
718 if (new_len
> skb
->dev
->mtu
)
722 spin_lock(&ife
->tcf_lock
);
723 bstats_update(&ife
->tcf_bstats
, skb
);
724 tcf_lastuse_update(&ife
->tcf_tm
);
726 if (!metalen
) { /* no metadata to send */
727 /* abuse overlimits to count when we allow packet
730 ife
->tcf_qstats
.overlimits
++;
731 spin_unlock(&ife
->tcf_lock
);
734 /* could be stupid policy setup or mtu config
735 * so lets be conservative.. */
736 if ((action
== TC_ACT_SHOT
) || exceed_mtu
) {
737 ife
->tcf_qstats
.drops
++;
738 spin_unlock(&ife
->tcf_lock
);
742 if (skb_at_tc_ingress(skb
))
743 skb_push(skb
, skb
->dev
->hard_header_len
);
745 ife_meta
= ife_encode(skb
, metalen
);
747 /* XXX: we dont have a clever way of telling encode to
748 * not repeat some of the computations that are done by
749 * ops->presence_check...
751 list_for_each_entry(e
, &ife
->metalist
, metalist
) {
752 if (e
->ops
->encode
) {
753 err
= e
->ops
->encode(skb
, (void *)(ife_meta
+ skboff
),
757 /* too corrupt to keep around if overwritten */
758 ife
->tcf_qstats
.drops
++;
759 spin_unlock(&ife
->tcf_lock
);
764 oethh
= (struct ethhdr
*)skb
->data
;
766 if (!is_zero_ether_addr(ife
->eth_src
))
767 ether_addr_copy(oethh
->h_source
, ife
->eth_src
);
768 if (!is_zero_ether_addr(ife
->eth_dst
))
769 ether_addr_copy(oethh
->h_dest
, ife
->eth_dst
);
770 oethh
->h_proto
= htons(ife
->eth_type
);
772 if (skb_at_tc_ingress(skb
))
773 skb_pull(skb
, skb
->dev
->hard_header_len
);
775 spin_unlock(&ife
->tcf_lock
);
780 static int tcf_ife_act(struct sk_buff
*skb
, const struct tc_action
*a
,
781 struct tcf_result
*res
)
783 struct tcf_ife_info
*ife
= to_ife(a
);
785 if (ife
->flags
& IFE_ENCODE
)
786 return tcf_ife_encode(skb
, a
, res
);
788 if (!(ife
->flags
& IFE_ENCODE
))
789 return tcf_ife_decode(skb
, a
, res
);
791 pr_info_ratelimited("unknown failure(policy neither de/encode\n");
792 spin_lock(&ife
->tcf_lock
);
793 bstats_update(&ife
->tcf_bstats
, skb
);
794 tcf_lastuse_update(&ife
->tcf_tm
);
795 ife
->tcf_qstats
.drops
++;
796 spin_unlock(&ife
->tcf_lock
);
801 static int tcf_ife_walker(struct net
*net
, struct sk_buff
*skb
,
802 struct netlink_callback
*cb
, int type
,
803 const struct tc_action_ops
*ops
)
805 struct tc_action_net
*tn
= net_generic(net
, ife_net_id
);
807 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
);
810 static int tcf_ife_search(struct net
*net
, struct tc_action
**a
, u32 index
)
812 struct tc_action_net
*tn
= net_generic(net
, ife_net_id
);
814 return tcf_hash_search(tn
, a
, index
);
817 static struct tc_action_ops act_ife_ops
= {
820 .owner
= THIS_MODULE
,
822 .dump
= tcf_ife_dump
,
823 .cleanup
= tcf_ife_cleanup
,
824 .init
= tcf_ife_init
,
825 .walk
= tcf_ife_walker
,
826 .lookup
= tcf_ife_search
,
827 .size
= sizeof(struct tcf_ife_info
),
830 static __net_init
int ife_init_net(struct net
*net
)
832 struct tc_action_net
*tn
= net_generic(net
, ife_net_id
);
834 return tc_action_net_init(tn
, &act_ife_ops
, IFE_TAB_MASK
);
837 static void __net_exit
ife_exit_net(struct net
*net
)
839 struct tc_action_net
*tn
= net_generic(net
, ife_net_id
);
841 tc_action_net_exit(tn
);
844 static struct pernet_operations ife_net_ops
= {
845 .init
= ife_init_net
,
846 .exit
= ife_exit_net
,
848 .size
= sizeof(struct tc_action_net
),
851 static int __init
ife_init_module(void)
853 return tcf_register_action(&act_ife_ops
, &ife_net_ops
);
856 static void __exit
ife_cleanup_module(void)
858 tcf_unregister_action(&act_ife_ops
, &ife_net_ops
);
861 module_init(ife_init_module
);
862 module_exit(ife_cleanup_module
);
864 MODULE_AUTHOR("Jamal Hadi Salim(2015)");
865 MODULE_DESCRIPTION("Inter-FE LFB action");
866 MODULE_LICENSE("GPL");