Merge tag 'rproc-v6.14' of git://git.kernel.org/pub/scm/linux/kernel/git/remoteproc...
[linux.git] / net / sched / cls_flower.c
blob03505673d5234df6b9c1419179c7a3474a6c8229
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/cls_flower.c Flower classifier
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6 */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 #include <linux/bitfield.h>
16 #include <linux/if_ether.h>
17 #include <linux/in6.h>
18 #include <linux/ip.h>
19 #include <linux/mpls.h>
20 #include <linux/ppp_defs.h>
22 #include <net/sch_generic.h>
23 #include <net/pkt_cls.h>
24 #include <net/pkt_sched.h>
25 #include <net/ip.h>
26 #include <net/flow_dissector.h>
27 #include <net/geneve.h>
28 #include <net/vxlan.h>
29 #include <net/erspan.h>
30 #include <net/gtp.h>
31 #include <net/pfcp.h>
32 #include <net/tc_wrapper.h>
34 #include <net/dst.h>
35 #include <net/dst_metadata.h>
37 #include <uapi/linux/netfilter/nf_conntrack_common.h>
39 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
40 ((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
41 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
42 (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
44 #define TCA_FLOWER_KEY_FLAGS_POLICY_MASK \
45 (TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT | \
46 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST)
48 #define TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK \
49 (TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM | \
50 TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT | \
51 TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM | \
52 TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT)
54 struct fl_flow_key {
55 struct flow_dissector_key_meta meta;
56 struct flow_dissector_key_control control;
57 struct flow_dissector_key_control enc_control;
58 struct flow_dissector_key_basic basic;
59 struct flow_dissector_key_eth_addrs eth;
60 struct flow_dissector_key_vlan vlan;
61 struct flow_dissector_key_vlan cvlan;
62 union {
63 struct flow_dissector_key_ipv4_addrs ipv4;
64 struct flow_dissector_key_ipv6_addrs ipv6;
66 struct flow_dissector_key_ports tp;
67 struct flow_dissector_key_icmp icmp;
68 struct flow_dissector_key_arp arp;
69 struct flow_dissector_key_keyid enc_key_id;
70 union {
71 struct flow_dissector_key_ipv4_addrs enc_ipv4;
72 struct flow_dissector_key_ipv6_addrs enc_ipv6;
74 struct flow_dissector_key_ports enc_tp;
75 struct flow_dissector_key_mpls mpls;
76 struct flow_dissector_key_tcp tcp;
77 struct flow_dissector_key_ip ip;
78 struct flow_dissector_key_ip enc_ip;
79 struct flow_dissector_key_enc_opts enc_opts;
80 struct flow_dissector_key_ports_range tp_range;
81 struct flow_dissector_key_ct ct;
82 struct flow_dissector_key_hash hash;
83 struct flow_dissector_key_num_of_vlans num_of_vlans;
84 struct flow_dissector_key_pppoe pppoe;
85 struct flow_dissector_key_l2tpv3 l2tpv3;
86 struct flow_dissector_key_ipsec ipsec;
87 struct flow_dissector_key_cfm cfm;
88 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
90 struct fl_flow_mask_range {
91 unsigned short int start;
92 unsigned short int end;
95 struct fl_flow_mask {
96 struct fl_flow_key key;
97 struct fl_flow_mask_range range;
98 u32 flags;
99 struct rhash_head ht_node;
100 struct rhashtable ht;
101 struct rhashtable_params filter_ht_params;
102 struct flow_dissector dissector;
103 struct list_head filters;
104 struct rcu_work rwork;
105 struct list_head list;
106 refcount_t refcnt;
109 struct fl_flow_tmplt {
110 struct fl_flow_key dummy_key;
111 struct fl_flow_key mask;
112 struct flow_dissector dissector;
113 struct tcf_chain *chain;
116 struct cls_fl_head {
117 struct rhashtable ht;
118 spinlock_t masks_lock; /* Protect masks list */
119 struct list_head masks;
120 struct list_head hw_filters;
121 struct rcu_work rwork;
122 struct idr handle_idr;
125 struct cls_fl_filter {
126 struct fl_flow_mask *mask;
127 struct rhash_head ht_node;
128 struct fl_flow_key mkey;
129 struct tcf_exts exts;
130 struct tcf_result res;
131 struct fl_flow_key key;
132 struct list_head list;
133 struct list_head hw_list;
134 u32 handle;
135 u32 flags;
136 u32 in_hw_count;
137 u8 needs_tc_skb_ext:1;
138 struct rcu_work rwork;
139 struct net_device *hw_dev;
140 /* Flower classifier is unlocked, which means that its reference counter
141 * can be changed concurrently without any kind of external
142 * synchronization. Use atomic reference counter to be concurrency-safe.
144 refcount_t refcnt;
145 bool deleted;
148 static const struct rhashtable_params mask_ht_params = {
149 .key_offset = offsetof(struct fl_flow_mask, key),
150 .key_len = sizeof(struct fl_flow_key),
151 .head_offset = offsetof(struct fl_flow_mask, ht_node),
152 .automatic_shrinking = true,
155 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
157 return mask->range.end - mask->range.start;
160 static void fl_mask_update_range(struct fl_flow_mask *mask)
162 const u8 *bytes = (const u8 *) &mask->key;
163 size_t size = sizeof(mask->key);
164 size_t i, first = 0, last;
166 for (i = 0; i < size; i++) {
167 if (bytes[i]) {
168 first = i;
169 break;
172 last = first;
173 for (i = size - 1; i != first; i--) {
174 if (bytes[i]) {
175 last = i;
176 break;
179 mask->range.start = rounddown(first, sizeof(long));
180 mask->range.end = roundup(last + 1, sizeof(long));
183 static void *fl_key_get_start(struct fl_flow_key *key,
184 const struct fl_flow_mask *mask)
186 return (u8 *) key + mask->range.start;
189 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
190 struct fl_flow_mask *mask)
192 const long *lkey = fl_key_get_start(key, mask);
193 const long *lmask = fl_key_get_start(&mask->key, mask);
194 long *lmkey = fl_key_get_start(mkey, mask);
195 int i;
197 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
198 *lmkey++ = *lkey++ & *lmask++;
201 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
202 struct fl_flow_mask *mask)
204 const long *lmask = fl_key_get_start(&mask->key, mask);
205 const long *ltmplt;
206 int i;
208 if (!tmplt)
209 return true;
210 ltmplt = fl_key_get_start(&tmplt->mask, mask);
211 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
212 if (~*ltmplt++ & *lmask++)
213 return false;
215 return true;
218 static void fl_clear_masked_range(struct fl_flow_key *key,
219 struct fl_flow_mask *mask)
221 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
224 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
225 struct fl_flow_key *key,
226 struct fl_flow_key *mkey)
228 u16 min_mask, max_mask, min_val, max_val;
230 min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
231 max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
232 min_val = ntohs(filter->key.tp_range.tp_min.dst);
233 max_val = ntohs(filter->key.tp_range.tp_max.dst);
235 if (min_mask && max_mask) {
236 if (ntohs(key->tp_range.tp.dst) < min_val ||
237 ntohs(key->tp_range.tp.dst) > max_val)
238 return false;
240 /* skb does not have min and max values */
241 mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
242 mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
244 return true;
247 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
248 struct fl_flow_key *key,
249 struct fl_flow_key *mkey)
251 u16 min_mask, max_mask, min_val, max_val;
253 min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
254 max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
255 min_val = ntohs(filter->key.tp_range.tp_min.src);
256 max_val = ntohs(filter->key.tp_range.tp_max.src);
258 if (min_mask && max_mask) {
259 if (ntohs(key->tp_range.tp.src) < min_val ||
260 ntohs(key->tp_range.tp.src) > max_val)
261 return false;
263 /* skb does not have min and max values */
264 mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
265 mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
267 return true;
270 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
271 struct fl_flow_key *mkey)
273 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
274 mask->filter_ht_params);
277 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
278 struct fl_flow_key *mkey,
279 struct fl_flow_key *key)
281 struct cls_fl_filter *filter, *f;
283 list_for_each_entry_rcu(filter, &mask->filters, list) {
284 if (!fl_range_port_dst_cmp(filter, key, mkey))
285 continue;
287 if (!fl_range_port_src_cmp(filter, key, mkey))
288 continue;
290 f = __fl_lookup(mask, mkey);
291 if (f)
292 return f;
294 return NULL;
297 static noinline_for_stack
298 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
300 struct fl_flow_key mkey;
302 fl_set_masked_key(&mkey, key, mask);
303 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
304 return fl_lookup_range(mask, &mkey, key);
306 return __fl_lookup(mask, &mkey);
309 static u16 fl_ct_info_to_flower_map[] = {
310 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
311 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
312 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
313 TCA_FLOWER_KEY_CT_FLAGS_RELATED,
314 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
315 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
316 TCA_FLOWER_KEY_CT_FLAGS_REPLY,
317 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
318 TCA_FLOWER_KEY_CT_FLAGS_RELATED |
319 TCA_FLOWER_KEY_CT_FLAGS_REPLY,
320 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
321 TCA_FLOWER_KEY_CT_FLAGS_NEW,
324 TC_INDIRECT_SCOPE int fl_classify(struct sk_buff *skb,
325 const struct tcf_proto *tp,
326 struct tcf_result *res)
328 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
329 bool post_ct = tc_skb_cb(skb)->post_ct;
330 u16 zone = tc_skb_cb(skb)->zone;
331 struct fl_flow_key skb_key;
332 struct fl_flow_mask *mask;
333 struct cls_fl_filter *f;
335 list_for_each_entry_rcu(mask, &head->masks, list) {
336 flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
337 fl_clear_masked_range(&skb_key, mask);
339 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
340 /* skb_flow_dissect() does not set n_proto in case an unknown
341 * protocol, so do it rather here.
343 skb_key.basic.n_proto = skb_protocol(skb, false);
344 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
345 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
346 fl_ct_info_to_flower_map,
347 ARRAY_SIZE(fl_ct_info_to_flower_map),
348 post_ct, zone);
349 skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
350 skb_flow_dissect(skb, &mask->dissector, &skb_key,
351 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
353 f = fl_mask_lookup(mask, &skb_key);
354 if (f && !tc_skip_sw(f->flags)) {
355 *res = f->res;
356 return tcf_exts_exec(skb, &f->exts, res);
359 return -1;
362 static int fl_init(struct tcf_proto *tp)
364 struct cls_fl_head *head;
366 head = kzalloc(sizeof(*head), GFP_KERNEL);
367 if (!head)
368 return -ENOBUFS;
370 spin_lock_init(&head->masks_lock);
371 INIT_LIST_HEAD_RCU(&head->masks);
372 INIT_LIST_HEAD(&head->hw_filters);
373 rcu_assign_pointer(tp->root, head);
374 idr_init(&head->handle_idr);
376 return rhashtable_init(&head->ht, &mask_ht_params);
379 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
381 /* temporary masks don't have their filters list and ht initialized */
382 if (mask_init_done) {
383 WARN_ON(!list_empty(&mask->filters));
384 rhashtable_destroy(&mask->ht);
386 kfree(mask);
389 static void fl_mask_free_work(struct work_struct *work)
391 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
392 struct fl_flow_mask, rwork);
394 fl_mask_free(mask, true);
397 static void fl_uninit_mask_free_work(struct work_struct *work)
399 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
400 struct fl_flow_mask, rwork);
402 fl_mask_free(mask, false);
405 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
407 if (!refcount_dec_and_test(&mask->refcnt))
408 return false;
410 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
412 spin_lock(&head->masks_lock);
413 list_del_rcu(&mask->list);
414 spin_unlock(&head->masks_lock);
416 tcf_queue_work(&mask->rwork, fl_mask_free_work);
418 return true;
421 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
423 /* Flower classifier only changes root pointer during init and destroy.
424 * Users must obtain reference to tcf_proto instance before calling its
425 * API, so tp->root pointer is protected from concurrent call to
426 * fl_destroy() by reference counting.
428 return rcu_dereference_raw(tp->root);
431 static void __fl_destroy_filter(struct cls_fl_filter *f)
433 if (f->needs_tc_skb_ext)
434 tc_skb_ext_tc_disable();
435 tcf_exts_destroy(&f->exts);
436 tcf_exts_put_net(&f->exts);
437 kfree(f);
440 static void fl_destroy_filter_work(struct work_struct *work)
442 struct cls_fl_filter *f = container_of(to_rcu_work(work),
443 struct cls_fl_filter, rwork);
445 __fl_destroy_filter(f);
448 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
449 bool rtnl_held, struct netlink_ext_ack *extack)
451 struct tcf_block *block = tp->chain->block;
452 struct flow_cls_offload cls_flower = {};
454 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
455 cls_flower.command = FLOW_CLS_DESTROY;
456 cls_flower.cookie = (unsigned long) f;
458 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
459 &f->flags, &f->in_hw_count, rtnl_held);
463 static int fl_hw_replace_filter(struct tcf_proto *tp,
464 struct cls_fl_filter *f, bool rtnl_held,
465 struct netlink_ext_ack *extack)
467 struct tcf_block *block = tp->chain->block;
468 struct flow_cls_offload cls_flower = {};
469 bool skip_sw = tc_skip_sw(f->flags);
470 int err = 0;
472 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
473 if (!cls_flower.rule)
474 return -ENOMEM;
476 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
477 cls_flower.command = FLOW_CLS_REPLACE;
478 cls_flower.cookie = (unsigned long) f;
479 cls_flower.rule->match.dissector = &f->mask->dissector;
480 cls_flower.rule->match.mask = &f->mask->key;
481 cls_flower.rule->match.key = &f->mkey;
482 cls_flower.classid = f->res.classid;
484 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
485 cls_flower.common.extack);
486 if (err) {
487 kfree(cls_flower.rule);
489 return skip_sw ? err : 0;
492 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
493 skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
494 tc_cleanup_offload_action(&cls_flower.rule->action);
495 kfree(cls_flower.rule);
497 if (err) {
498 fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
499 return err;
502 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
503 return -EINVAL;
505 return 0;
508 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
509 bool rtnl_held)
511 struct tcf_block *block = tp->chain->block;
512 struct flow_cls_offload cls_flower = {};
514 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
515 cls_flower.command = FLOW_CLS_STATS;
516 cls_flower.cookie = (unsigned long) f;
517 cls_flower.classid = f->res.classid;
519 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
520 rtnl_held);
522 tcf_exts_hw_stats_update(&f->exts, &cls_flower.stats, cls_flower.use_act_stats);
525 static void __fl_put(struct cls_fl_filter *f)
527 if (!refcount_dec_and_test(&f->refcnt))
528 return;
530 if (tcf_exts_get_net(&f->exts))
531 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
532 else
533 __fl_destroy_filter(f);
536 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
538 struct cls_fl_filter *f;
540 rcu_read_lock();
541 f = idr_find(&head->handle_idr, handle);
542 if (f && !refcount_inc_not_zero(&f->refcnt))
543 f = NULL;
544 rcu_read_unlock();
546 return f;
549 static struct tcf_exts *fl_get_exts(const struct tcf_proto *tp, u32 handle)
551 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
552 struct cls_fl_filter *f;
554 f = idr_find(&head->handle_idr, handle);
555 return f ? &f->exts : NULL;
558 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
559 bool *last, bool rtnl_held,
560 struct netlink_ext_ack *extack)
562 struct cls_fl_head *head = fl_head_dereference(tp);
564 *last = false;
566 spin_lock(&tp->lock);
567 if (f->deleted) {
568 spin_unlock(&tp->lock);
569 return -ENOENT;
572 f->deleted = true;
573 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
574 f->mask->filter_ht_params);
575 idr_remove(&head->handle_idr, f->handle);
576 list_del_rcu(&f->list);
577 spin_unlock(&tp->lock);
579 *last = fl_mask_put(head, f->mask);
580 if (!tc_skip_hw(f->flags))
581 fl_hw_destroy_filter(tp, f, rtnl_held, extack);
582 tcf_unbind_filter(tp, &f->res);
583 __fl_put(f);
585 return 0;
588 static void fl_destroy_sleepable(struct work_struct *work)
590 struct cls_fl_head *head = container_of(to_rcu_work(work),
591 struct cls_fl_head,
592 rwork);
594 rhashtable_destroy(&head->ht);
595 kfree(head);
596 module_put(THIS_MODULE);
599 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
600 struct netlink_ext_ack *extack)
602 struct cls_fl_head *head = fl_head_dereference(tp);
603 struct fl_flow_mask *mask, *next_mask;
604 struct cls_fl_filter *f, *next;
605 bool last;
607 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
608 list_for_each_entry_safe(f, next, &mask->filters, list) {
609 __fl_delete(tp, f, &last, rtnl_held, extack);
610 if (last)
611 break;
614 idr_destroy(&head->handle_idr);
616 __module_get(THIS_MODULE);
617 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
620 static void fl_put(struct tcf_proto *tp, void *arg)
622 struct cls_fl_filter *f = arg;
624 __fl_put(f);
627 static void *fl_get(struct tcf_proto *tp, u32 handle)
629 struct cls_fl_head *head = fl_head_dereference(tp);
631 return __fl_get(head, handle);
634 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
635 [TCA_FLOWER_UNSPEC] = { .strict_start_type =
636 TCA_FLOWER_L2_MISS },
637 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
638 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
639 .len = IFNAMSIZ },
640 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
641 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
642 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
643 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
644 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
645 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
646 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
647 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
648 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
649 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
650 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
651 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
652 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
653 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
654 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
655 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
656 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
657 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
658 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
659 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
660 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
661 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
662 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
663 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
664 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
665 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
666 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
667 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
668 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
669 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
670 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
671 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
672 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
673 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
674 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
675 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
676 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
677 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
678 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
679 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
680 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
681 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
682 [TCA_FLOWER_KEY_FLAGS] = NLA_POLICY_MASK(NLA_BE32,
683 TCA_FLOWER_KEY_FLAGS_POLICY_MASK),
684 [TCA_FLOWER_KEY_FLAGS_MASK] = NLA_POLICY_MASK(NLA_BE32,
685 TCA_FLOWER_KEY_FLAGS_POLICY_MASK),
686 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
687 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
688 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
689 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
690 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
691 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
692 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
693 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
694 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
695 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
696 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
697 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
698 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
699 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
700 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
701 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
702 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
703 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
704 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
705 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
706 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
707 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
708 [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED },
709 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
710 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
711 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
712 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
713 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
714 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
715 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
716 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
717 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
718 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
719 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
720 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
721 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
722 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
723 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
724 [TCA_FLOWER_KEY_CT_STATE] =
725 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
726 [TCA_FLOWER_KEY_CT_STATE_MASK] =
727 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
728 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 },
729 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 },
730 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 },
731 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 },
732 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY,
733 .len = 128 / BITS_PER_BYTE },
734 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
735 .len = 128 / BITS_PER_BYTE },
736 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
737 [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 },
738 [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 },
739 [TCA_FLOWER_KEY_NUM_OF_VLANS] = { .type = NLA_U8 },
740 [TCA_FLOWER_KEY_PPPOE_SID] = { .type = NLA_U16 },
741 [TCA_FLOWER_KEY_PPP_PROTO] = { .type = NLA_U16 },
742 [TCA_FLOWER_KEY_L2TPV3_SID] = { .type = NLA_U32 },
743 [TCA_FLOWER_KEY_SPI] = { .type = NLA_U32 },
744 [TCA_FLOWER_KEY_SPI_MASK] = { .type = NLA_U32 },
745 [TCA_FLOWER_L2_MISS] = NLA_POLICY_MAX(NLA_U8, 1),
746 [TCA_FLOWER_KEY_CFM] = { .type = NLA_NESTED },
747 [TCA_FLOWER_KEY_ENC_FLAGS] = NLA_POLICY_MASK(NLA_BE32,
748 TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK),
749 [TCA_FLOWER_KEY_ENC_FLAGS_MASK] = NLA_POLICY_MASK(NLA_BE32,
750 TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK),
753 static const struct nla_policy
754 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
755 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = {
756 .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
757 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
758 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
759 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
760 [TCA_FLOWER_KEY_ENC_OPTS_GTP] = { .type = NLA_NESTED },
761 [TCA_FLOWER_KEY_ENC_OPTS_PFCP] = { .type = NLA_NESTED },
764 static const struct nla_policy
765 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
766 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
767 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
768 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
769 .len = 128 },
772 static const struct nla_policy
773 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
774 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
777 static const struct nla_policy
778 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
779 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
780 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
781 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
782 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
785 static const struct nla_policy
786 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
787 [TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] = { .type = NLA_U8 },
788 [TCA_FLOWER_KEY_ENC_OPT_GTP_QFI] = { .type = NLA_U8 },
791 static const struct nla_policy
792 pfcp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1] = {
793 [TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE] = { .type = NLA_U8 },
794 [TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID] = { .type = NLA_U64 },
797 static const struct nla_policy
798 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
799 [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 },
800 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 },
801 [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 },
802 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 },
803 [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 },
806 static const struct nla_policy
807 cfm_opt_policy[TCA_FLOWER_KEY_CFM_OPT_MAX + 1] = {
808 [TCA_FLOWER_KEY_CFM_MD_LEVEL] = NLA_POLICY_MAX(NLA_U8,
809 FLOW_DIS_CFM_MDL_MAX),
810 [TCA_FLOWER_KEY_CFM_OPCODE] = { .type = NLA_U8 },
813 static void fl_set_key_val(struct nlattr **tb,
814 void *val, int val_type,
815 void *mask, int mask_type, int len)
817 if (!tb[val_type])
818 return;
819 nla_memcpy(val, tb[val_type], len);
820 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
821 memset(mask, 0xff, len);
822 else
823 nla_memcpy(mask, tb[mask_type], len);
826 static int fl_set_key_spi(struct nlattr **tb, struct fl_flow_key *key,
827 struct fl_flow_key *mask,
828 struct netlink_ext_ack *extack)
830 if (key->basic.ip_proto != IPPROTO_ESP &&
831 key->basic.ip_proto != IPPROTO_AH) {
832 NL_SET_ERR_MSG(extack,
833 "Protocol must be either ESP or AH");
834 return -EINVAL;
837 fl_set_key_val(tb, &key->ipsec.spi,
838 TCA_FLOWER_KEY_SPI,
839 &mask->ipsec.spi, TCA_FLOWER_KEY_SPI_MASK,
840 sizeof(key->ipsec.spi));
841 return 0;
844 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
845 struct fl_flow_key *mask,
846 struct netlink_ext_ack *extack)
848 fl_set_key_val(tb, &key->tp_range.tp_min.dst,
849 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
850 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
851 fl_set_key_val(tb, &key->tp_range.tp_max.dst,
852 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
853 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
854 fl_set_key_val(tb, &key->tp_range.tp_min.src,
855 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
856 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
857 fl_set_key_val(tb, &key->tp_range.tp_max.src,
858 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
859 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
861 if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) {
862 NL_SET_ERR_MSG(extack,
863 "Both min and max destination ports must be specified");
864 return -EINVAL;
866 if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) {
867 NL_SET_ERR_MSG(extack,
868 "Both min and max source ports must be specified");
869 return -EINVAL;
871 if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
872 ntohs(key->tp_range.tp_max.dst) <=
873 ntohs(key->tp_range.tp_min.dst)) {
874 NL_SET_ERR_MSG_ATTR(extack,
875 tb[TCA_FLOWER_KEY_PORT_DST_MIN],
876 "Invalid destination port range (min must be strictly smaller than max)");
877 return -EINVAL;
879 if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
880 ntohs(key->tp_range.tp_max.src) <=
881 ntohs(key->tp_range.tp_min.src)) {
882 NL_SET_ERR_MSG_ATTR(extack,
883 tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
884 "Invalid source port range (min must be strictly smaller than max)");
885 return -EINVAL;
888 return 0;
891 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
892 struct flow_dissector_key_mpls *key_val,
893 struct flow_dissector_key_mpls *key_mask,
894 struct netlink_ext_ack *extack)
896 struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
897 struct flow_dissector_mpls_lse *lse_mask;
898 struct flow_dissector_mpls_lse *lse_val;
899 u8 lse_index;
900 u8 depth;
901 int err;
903 err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
904 mpls_stack_entry_policy, extack);
905 if (err < 0)
906 return err;
908 if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
909 NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
910 return -EINVAL;
913 depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
915 /* LSE depth starts at 1, for consistency with terminology used by
916 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
918 if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
919 NL_SET_ERR_MSG_ATTR(extack,
920 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
921 "Invalid MPLS depth");
922 return -EINVAL;
924 lse_index = depth - 1;
926 dissector_set_mpls_lse(key_val, lse_index);
927 dissector_set_mpls_lse(key_mask, lse_index);
929 lse_val = &key_val->ls[lse_index];
930 lse_mask = &key_mask->ls[lse_index];
932 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
933 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
934 lse_mask->mpls_ttl = MPLS_TTL_MASK;
936 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
937 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
939 if (bos & ~MPLS_BOS_MASK) {
940 NL_SET_ERR_MSG_ATTR(extack,
941 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
942 "Bottom Of Stack (BOS) must be 0 or 1");
943 return -EINVAL;
945 lse_val->mpls_bos = bos;
946 lse_mask->mpls_bos = MPLS_BOS_MASK;
948 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
949 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
951 if (tc & ~MPLS_TC_MASK) {
952 NL_SET_ERR_MSG_ATTR(extack,
953 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
954 "Traffic Class (TC) must be between 0 and 7");
955 return -EINVAL;
957 lse_val->mpls_tc = tc;
958 lse_mask->mpls_tc = MPLS_TC_MASK;
960 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
961 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
963 if (label & ~MPLS_LABEL_MASK) {
964 NL_SET_ERR_MSG_ATTR(extack,
965 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
966 "Label must be between 0 and 1048575");
967 return -EINVAL;
969 lse_val->mpls_label = label;
970 lse_mask->mpls_label = MPLS_LABEL_MASK;
973 return 0;
976 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
977 struct flow_dissector_key_mpls *key_val,
978 struct flow_dissector_key_mpls *key_mask,
979 struct netlink_ext_ack *extack)
981 struct nlattr *nla_lse;
982 int rem;
983 int err;
985 if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
986 NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
987 "NLA_F_NESTED is missing");
988 return -EINVAL;
991 nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
992 if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
993 NL_SET_ERR_MSG_ATTR(extack, nla_lse,
994 "Invalid MPLS option type");
995 return -EINVAL;
998 err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
999 if (err < 0)
1000 return err;
1002 if (rem) {
1003 NL_SET_ERR_MSG(extack,
1004 "Bytes leftover after parsing MPLS options");
1005 return -EINVAL;
1008 return 0;
1011 static int fl_set_key_mpls(struct nlattr **tb,
1012 struct flow_dissector_key_mpls *key_val,
1013 struct flow_dissector_key_mpls *key_mask,
1014 struct netlink_ext_ack *extack)
1016 struct flow_dissector_mpls_lse *lse_mask;
1017 struct flow_dissector_mpls_lse *lse_val;
1019 if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
1020 if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
1021 tb[TCA_FLOWER_KEY_MPLS_BOS] ||
1022 tb[TCA_FLOWER_KEY_MPLS_TC] ||
1023 tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
1024 NL_SET_ERR_MSG_ATTR(extack,
1025 tb[TCA_FLOWER_KEY_MPLS_OPTS],
1026 "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
1027 return -EBADMSG;
1030 return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
1031 key_val, key_mask, extack);
1034 lse_val = &key_val->ls[0];
1035 lse_mask = &key_mask->ls[0];
1037 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
1038 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
1039 lse_mask->mpls_ttl = MPLS_TTL_MASK;
1040 dissector_set_mpls_lse(key_val, 0);
1041 dissector_set_mpls_lse(key_mask, 0);
1043 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
1044 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
1046 if (bos & ~MPLS_BOS_MASK) {
1047 NL_SET_ERR_MSG_ATTR(extack,
1048 tb[TCA_FLOWER_KEY_MPLS_BOS],
1049 "Bottom Of Stack (BOS) must be 0 or 1");
1050 return -EINVAL;
1052 lse_val->mpls_bos = bos;
1053 lse_mask->mpls_bos = MPLS_BOS_MASK;
1054 dissector_set_mpls_lse(key_val, 0);
1055 dissector_set_mpls_lse(key_mask, 0);
1057 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
1058 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
1060 if (tc & ~MPLS_TC_MASK) {
1061 NL_SET_ERR_MSG_ATTR(extack,
1062 tb[TCA_FLOWER_KEY_MPLS_TC],
1063 "Traffic Class (TC) must be between 0 and 7");
1064 return -EINVAL;
1066 lse_val->mpls_tc = tc;
1067 lse_mask->mpls_tc = MPLS_TC_MASK;
1068 dissector_set_mpls_lse(key_val, 0);
1069 dissector_set_mpls_lse(key_mask, 0);
1071 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
1072 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
1074 if (label & ~MPLS_LABEL_MASK) {
1075 NL_SET_ERR_MSG_ATTR(extack,
1076 tb[TCA_FLOWER_KEY_MPLS_LABEL],
1077 "Label must be between 0 and 1048575");
1078 return -EINVAL;
1080 lse_val->mpls_label = label;
1081 lse_mask->mpls_label = MPLS_LABEL_MASK;
1082 dissector_set_mpls_lse(key_val, 0);
1083 dissector_set_mpls_lse(key_mask, 0);
1085 return 0;
1088 static void fl_set_key_vlan(struct nlattr **tb,
1089 __be16 ethertype,
1090 int vlan_id_key, int vlan_prio_key,
1091 int vlan_next_eth_type_key,
1092 struct flow_dissector_key_vlan *key_val,
1093 struct flow_dissector_key_vlan *key_mask)
1095 #define VLAN_PRIORITY_MASK 0x7
1097 if (tb[vlan_id_key]) {
1098 key_val->vlan_id =
1099 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1100 key_mask->vlan_id = VLAN_VID_MASK;
1102 if (tb[vlan_prio_key]) {
1103 key_val->vlan_priority =
1104 nla_get_u8(tb[vlan_prio_key]) &
1105 VLAN_PRIORITY_MASK;
1106 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1108 if (ethertype) {
1109 key_val->vlan_tpid = ethertype;
1110 key_mask->vlan_tpid = cpu_to_be16(~0);
1112 if (tb[vlan_next_eth_type_key]) {
1113 key_val->vlan_eth_type =
1114 nla_get_be16(tb[vlan_next_eth_type_key]);
1115 key_mask->vlan_eth_type = cpu_to_be16(~0);
1119 static void fl_set_key_pppoe(struct nlattr **tb,
1120 struct flow_dissector_key_pppoe *key_val,
1121 struct flow_dissector_key_pppoe *key_mask,
1122 struct fl_flow_key *key,
1123 struct fl_flow_key *mask)
1125 /* key_val::type must be set to ETH_P_PPP_SES
1126 * because ETH_P_PPP_SES was stored in basic.n_proto
1127 * which might get overwritten by ppp_proto
1128 * or might be set to 0, the role of key_val::type
1129 * is similar to vlan_key::tpid
1131 key_val->type = htons(ETH_P_PPP_SES);
1132 key_mask->type = cpu_to_be16(~0);
1134 if (tb[TCA_FLOWER_KEY_PPPOE_SID]) {
1135 key_val->session_id =
1136 nla_get_be16(tb[TCA_FLOWER_KEY_PPPOE_SID]);
1137 key_mask->session_id = cpu_to_be16(~0);
1139 if (tb[TCA_FLOWER_KEY_PPP_PROTO]) {
1140 key_val->ppp_proto =
1141 nla_get_be16(tb[TCA_FLOWER_KEY_PPP_PROTO]);
1142 key_mask->ppp_proto = cpu_to_be16(~0);
1144 if (key_val->ppp_proto == htons(PPP_IP)) {
1145 key->basic.n_proto = htons(ETH_P_IP);
1146 mask->basic.n_proto = cpu_to_be16(~0);
1147 } else if (key_val->ppp_proto == htons(PPP_IPV6)) {
1148 key->basic.n_proto = htons(ETH_P_IPV6);
1149 mask->basic.n_proto = cpu_to_be16(~0);
1150 } else if (key_val->ppp_proto == htons(PPP_MPLS_UC)) {
1151 key->basic.n_proto = htons(ETH_P_MPLS_UC);
1152 mask->basic.n_proto = cpu_to_be16(~0);
1153 } else if (key_val->ppp_proto == htons(PPP_MPLS_MC)) {
1154 key->basic.n_proto = htons(ETH_P_MPLS_MC);
1155 mask->basic.n_proto = cpu_to_be16(~0);
1157 } else {
1158 key->basic.n_proto = 0;
1159 mask->basic.n_proto = cpu_to_be16(0);
1163 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1164 u32 *dissector_key, u32 *dissector_mask,
1165 u32 flower_flag_bit, u32 dissector_flag_bit)
1167 if (flower_mask & flower_flag_bit) {
1168 *dissector_mask |= dissector_flag_bit;
1169 if (flower_key & flower_flag_bit)
1170 *dissector_key |= dissector_flag_bit;
1174 static int fl_set_key_flags(struct nlattr *tca_opts, struct nlattr **tb,
1175 bool encap, u32 *flags_key, u32 *flags_mask,
1176 struct netlink_ext_ack *extack)
1178 int fl_key, fl_mask;
1179 u32 key, mask;
1181 if (encap) {
1182 fl_key = TCA_FLOWER_KEY_ENC_FLAGS;
1183 fl_mask = TCA_FLOWER_KEY_ENC_FLAGS_MASK;
1184 } else {
1185 fl_key = TCA_FLOWER_KEY_FLAGS;
1186 fl_mask = TCA_FLOWER_KEY_FLAGS_MASK;
1189 /* mask is mandatory for flags */
1190 if (NL_REQ_ATTR_CHECK(extack, tca_opts, tb, fl_mask)) {
1191 NL_SET_ERR_MSG(extack, "Missing flags mask");
1192 return -EINVAL;
1195 key = be32_to_cpu(nla_get_be32(tb[fl_key]));
1196 mask = be32_to_cpu(nla_get_be32(tb[fl_mask]));
1198 *flags_key = 0;
1199 *flags_mask = 0;
1201 fl_set_key_flag(key, mask, flags_key, flags_mask,
1202 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1203 fl_set_key_flag(key, mask, flags_key, flags_mask,
1204 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1205 FLOW_DIS_FIRST_FRAG);
1207 fl_set_key_flag(key, mask, flags_key, flags_mask,
1208 TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM,
1209 FLOW_DIS_F_TUNNEL_CSUM);
1211 fl_set_key_flag(key, mask, flags_key, flags_mask,
1212 TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT,
1213 FLOW_DIS_F_TUNNEL_DONT_FRAGMENT);
1215 fl_set_key_flag(key, mask, flags_key, flags_mask,
1216 TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, FLOW_DIS_F_TUNNEL_OAM);
1218 fl_set_key_flag(key, mask, flags_key, flags_mask,
1219 TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT,
1220 FLOW_DIS_F_TUNNEL_CRIT_OPT);
1222 return 0;
1225 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1226 struct flow_dissector_key_ip *key,
1227 struct flow_dissector_key_ip *mask)
1229 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1230 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1231 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1232 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1234 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1235 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1238 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1239 int depth, int option_len,
1240 struct netlink_ext_ack *extack)
1242 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1243 struct nlattr *class = NULL, *type = NULL, *data = NULL;
1244 struct geneve_opt *opt;
1245 int err, data_len = 0;
1247 if (option_len > sizeof(struct geneve_opt))
1248 data_len = option_len - sizeof(struct geneve_opt);
1250 if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
1251 return -ERANGE;
1253 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1254 memset(opt, 0xff, option_len);
1255 opt->length = data_len / 4;
1256 opt->r1 = 0;
1257 opt->r2 = 0;
1258 opt->r3 = 0;
1260 /* If no mask has been prodived we assume an exact match. */
1261 if (!depth)
1262 return sizeof(struct geneve_opt) + data_len;
1264 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1265 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1266 return -EINVAL;
1269 err = nla_parse_nested_deprecated(tb,
1270 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1271 nla, geneve_opt_policy, extack);
1272 if (err < 0)
1273 return err;
1275 /* We are not allowed to omit any of CLASS, TYPE or DATA
1276 * fields from the key.
1278 if (!option_len &&
1279 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1280 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1281 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1282 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1283 return -EINVAL;
1286 /* Omitting any of CLASS, TYPE or DATA fields is allowed
1287 * for the mask.
1289 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1290 int new_len = key->enc_opts.len;
1292 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1293 data_len = nla_len(data);
1294 if (data_len < 4) {
1295 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1296 return -ERANGE;
1298 if (data_len % 4) {
1299 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1300 return -ERANGE;
1303 new_len += sizeof(struct geneve_opt) + data_len;
1304 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1305 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1306 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1307 return -ERANGE;
1309 opt->length = data_len / 4;
1310 memcpy(opt->opt_data, nla_data(data), data_len);
1313 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1314 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1315 opt->opt_class = nla_get_be16(class);
1318 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1319 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1320 opt->type = nla_get_u8(type);
1323 return sizeof(struct geneve_opt) + data_len;
1326 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1327 int depth, int option_len,
1328 struct netlink_ext_ack *extack)
1330 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1331 struct vxlan_metadata *md;
1332 int err;
1334 md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1335 memset(md, 0xff, sizeof(*md));
1337 if (!depth)
1338 return sizeof(*md);
1340 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1341 NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1342 return -EINVAL;
1345 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1346 vxlan_opt_policy, extack);
1347 if (err < 0)
1348 return err;
1350 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1351 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1352 return -EINVAL;
1355 if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1356 md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1357 md->gbp &= VXLAN_GBP_MASK;
1360 return sizeof(*md);
1363 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1364 int depth, int option_len,
1365 struct netlink_ext_ack *extack)
1367 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1368 struct erspan_metadata *md;
1369 int err;
1371 md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1372 md->version = 1;
1374 if (!depth)
1375 return sizeof(*md);
1377 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1378 NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1379 return -EINVAL;
1382 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1383 erspan_opt_policy, extack);
1384 if (err < 0)
1385 return err;
1387 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1388 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1389 return -EINVAL;
1392 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1393 md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1395 if (md->version == 1) {
1396 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1397 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1398 return -EINVAL;
1400 memset(&md->u.index, 0xff, sizeof(md->u.index));
1401 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1402 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1403 md->u.index = nla_get_be32(nla);
1405 } else if (md->version == 2) {
1406 if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1407 !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1408 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1409 return -EINVAL;
1411 md->u.md2.dir = 1;
1412 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1413 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1414 md->u.md2.dir = nla_get_u8(nla);
1416 set_hwid(&md->u.md2, 0xff);
1417 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1418 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1419 set_hwid(&md->u.md2, nla_get_u8(nla));
1421 } else {
1422 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1423 return -EINVAL;
1426 return sizeof(*md);
1429 static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
1430 int depth, int option_len,
1431 struct netlink_ext_ack *extack)
1433 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1];
1434 struct gtp_pdu_session_info *sinfo;
1435 u8 len = key->enc_opts.len;
1436 int err;
1438 sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len];
1439 memset(sinfo, 0xff, option_len);
1441 if (!depth)
1442 return sizeof(*sinfo);
1444 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) {
1445 NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask");
1446 return -EINVAL;
1449 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla,
1450 gtp_opt_policy, extack);
1451 if (err < 0)
1452 return err;
1454 if (!option_len &&
1455 (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] ||
1456 !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) {
1457 NL_SET_ERR_MSG_MOD(extack,
1458 "Missing tunnel key gtp option pdu type or qfi");
1459 return -EINVAL;
1462 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE])
1463 sinfo->pdu_type =
1464 nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]);
1466 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])
1467 sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]);
1469 return sizeof(*sinfo);
1472 static int fl_set_pfcp_opt(const struct nlattr *nla, struct fl_flow_key *key,
1473 int depth, int option_len,
1474 struct netlink_ext_ack *extack)
1476 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1];
1477 struct pfcp_metadata *md;
1478 int err;
1480 md = (struct pfcp_metadata *)&key->enc_opts.data[key->enc_opts.len];
1481 memset(md, 0xff, sizeof(*md));
1483 if (!depth)
1484 return sizeof(*md);
1486 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_PFCP) {
1487 NL_SET_ERR_MSG_MOD(extack, "Non-pfcp option type for mask");
1488 return -EINVAL;
1491 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX, nla,
1492 pfcp_opt_policy, extack);
1493 if (err < 0)
1494 return err;
1496 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]) {
1497 NL_SET_ERR_MSG_MOD(extack, "Missing tunnel key pfcp option type");
1498 return -EINVAL;
1501 if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE])
1502 md->type = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]);
1504 if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID])
1505 md->seid = nla_get_be64(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID]);
1507 return sizeof(*md);
1510 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1511 struct fl_flow_key *mask,
1512 struct netlink_ext_ack *extack)
1514 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1515 int err, option_len, key_depth, msk_depth = 0;
1517 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1518 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1519 enc_opts_policy, extack);
1520 if (err)
1521 return err;
1523 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1525 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1526 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1527 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1528 enc_opts_policy, extack);
1529 if (err)
1530 return err;
1532 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1533 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1534 if (!nla_ok(nla_opt_msk, msk_depth)) {
1535 NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1536 return -EINVAL;
1540 nla_for_each_attr(nla_opt_key, nla_enc_key,
1541 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1542 switch (nla_type(nla_opt_key)) {
1543 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1544 if (key->enc_opts.dst_opt_type &&
1545 key->enc_opts.dst_opt_type !=
1546 IP_TUNNEL_GENEVE_OPT_BIT) {
1547 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1548 return -EINVAL;
1550 option_len = 0;
1551 key->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT;
1552 option_len = fl_set_geneve_opt(nla_opt_key, key,
1553 key_depth, option_len,
1554 extack);
1555 if (option_len < 0)
1556 return option_len;
1558 key->enc_opts.len += option_len;
1559 /* At the same time we need to parse through the mask
1560 * in order to verify exact and mask attribute lengths.
1562 mask->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT;
1563 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1564 msk_depth, option_len,
1565 extack);
1566 if (option_len < 0)
1567 return option_len;
1569 mask->enc_opts.len += option_len;
1570 if (key->enc_opts.len != mask->enc_opts.len) {
1571 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1572 return -EINVAL;
1574 break;
1575 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1576 if (key->enc_opts.dst_opt_type) {
1577 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1578 return -EINVAL;
1580 option_len = 0;
1581 key->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT;
1582 option_len = fl_set_vxlan_opt(nla_opt_key, key,
1583 key_depth, option_len,
1584 extack);
1585 if (option_len < 0)
1586 return option_len;
1588 key->enc_opts.len += option_len;
1589 /* At the same time we need to parse through the mask
1590 * in order to verify exact and mask attribute lengths.
1592 mask->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT;
1593 option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1594 msk_depth, option_len,
1595 extack);
1596 if (option_len < 0)
1597 return option_len;
1599 mask->enc_opts.len += option_len;
1600 if (key->enc_opts.len != mask->enc_opts.len) {
1601 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1602 return -EINVAL;
1604 break;
1605 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1606 if (key->enc_opts.dst_opt_type) {
1607 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1608 return -EINVAL;
1610 option_len = 0;
1611 key->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT;
1612 option_len = fl_set_erspan_opt(nla_opt_key, key,
1613 key_depth, option_len,
1614 extack);
1615 if (option_len < 0)
1616 return option_len;
1618 key->enc_opts.len += option_len;
1619 /* At the same time we need to parse through the mask
1620 * in order to verify exact and mask attribute lengths.
1622 mask->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT;
1623 option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1624 msk_depth, option_len,
1625 extack);
1626 if (option_len < 0)
1627 return option_len;
1629 mask->enc_opts.len += option_len;
1630 if (key->enc_opts.len != mask->enc_opts.len) {
1631 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1632 return -EINVAL;
1634 break;
1635 case TCA_FLOWER_KEY_ENC_OPTS_GTP:
1636 if (key->enc_opts.dst_opt_type) {
1637 NL_SET_ERR_MSG_MOD(extack,
1638 "Duplicate type for gtp options");
1639 return -EINVAL;
1641 option_len = 0;
1642 key->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT;
1643 option_len = fl_set_gtp_opt(nla_opt_key, key,
1644 key_depth, option_len,
1645 extack);
1646 if (option_len < 0)
1647 return option_len;
1649 key->enc_opts.len += option_len;
1650 /* At the same time we need to parse through the mask
1651 * in order to verify exact and mask attribute lengths.
1653 mask->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT;
1654 option_len = fl_set_gtp_opt(nla_opt_msk, mask,
1655 msk_depth, option_len,
1656 extack);
1657 if (option_len < 0)
1658 return option_len;
1660 mask->enc_opts.len += option_len;
1661 if (key->enc_opts.len != mask->enc_opts.len) {
1662 NL_SET_ERR_MSG_MOD(extack,
1663 "Key and mask miss aligned");
1664 return -EINVAL;
1666 break;
1667 case TCA_FLOWER_KEY_ENC_OPTS_PFCP:
1668 if (key->enc_opts.dst_opt_type) {
1669 NL_SET_ERR_MSG_MOD(extack, "Duplicate type for pfcp options");
1670 return -EINVAL;
1672 option_len = 0;
1673 key->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT;
1674 option_len = fl_set_pfcp_opt(nla_opt_key, key,
1675 key_depth, option_len,
1676 extack);
1677 if (option_len < 0)
1678 return option_len;
1680 key->enc_opts.len += option_len;
1681 /* At the same time we need to parse through the mask
1682 * in order to verify exact and mask attribute lengths.
1684 mask->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT;
1685 option_len = fl_set_pfcp_opt(nla_opt_msk, mask,
1686 msk_depth, option_len,
1687 extack);
1688 if (option_len < 0)
1689 return option_len;
1691 mask->enc_opts.len += option_len;
1692 if (key->enc_opts.len != mask->enc_opts.len) {
1693 NL_SET_ERR_MSG_MOD(extack, "Key and mask miss aligned");
1694 return -EINVAL;
1696 break;
1697 default:
1698 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1699 return -EINVAL;
1702 if (!msk_depth)
1703 continue;
1705 if (!nla_ok(nla_opt_msk, msk_depth)) {
1706 NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1707 return -EINVAL;
1709 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1712 return 0;
1715 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1716 struct netlink_ext_ack *extack)
1718 if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1719 NL_SET_ERR_MSG_ATTR(extack, tb,
1720 "no trk, so no other flag can be set");
1721 return -EINVAL;
1724 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1725 state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1726 NL_SET_ERR_MSG_ATTR(extack, tb,
1727 "new and est are mutually exclusive");
1728 return -EINVAL;
1731 if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1732 state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1733 TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1734 NL_SET_ERR_MSG_ATTR(extack, tb,
1735 "when inv is set, only trk may be set");
1736 return -EINVAL;
1739 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1740 state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1741 NL_SET_ERR_MSG_ATTR(extack, tb,
1742 "new and rpl are mutually exclusive");
1743 return -EINVAL;
1746 return 0;
1749 static int fl_set_key_ct(struct nlattr **tb,
1750 struct flow_dissector_key_ct *key,
1751 struct flow_dissector_key_ct *mask,
1752 struct netlink_ext_ack *extack)
1754 if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1755 int err;
1757 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1758 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1759 return -EOPNOTSUPP;
1761 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1762 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1763 sizeof(key->ct_state));
1765 err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1766 tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1767 extack);
1768 if (err)
1769 return err;
1772 if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1773 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1774 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1775 return -EOPNOTSUPP;
1777 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1778 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1779 sizeof(key->ct_zone));
1781 if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1782 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1783 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1784 return -EOPNOTSUPP;
1786 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1787 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1788 sizeof(key->ct_mark));
1790 if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1791 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1792 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1793 return -EOPNOTSUPP;
1795 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1796 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1797 sizeof(key->ct_labels));
1800 return 0;
1803 static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype,
1804 struct fl_flow_key *key, struct fl_flow_key *mask,
1805 int vthresh)
1807 const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh;
1809 if (!tb) {
1810 *ethertype = 0;
1811 return good_num_of_vlans;
1814 *ethertype = nla_get_be16(tb);
1815 if (good_num_of_vlans || eth_type_vlan(*ethertype))
1816 return true;
1818 key->basic.n_proto = *ethertype;
1819 mask->basic.n_proto = cpu_to_be16(~0);
1820 return false;
1823 static void fl_set_key_cfm_md_level(struct nlattr **tb,
1824 struct fl_flow_key *key,
1825 struct fl_flow_key *mask,
1826 struct netlink_ext_ack *extack)
1828 u8 level;
1830 if (!tb[TCA_FLOWER_KEY_CFM_MD_LEVEL])
1831 return;
1833 level = nla_get_u8(tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]);
1834 key->cfm.mdl_ver = FIELD_PREP(FLOW_DIS_CFM_MDL_MASK, level);
1835 mask->cfm.mdl_ver = FLOW_DIS_CFM_MDL_MASK;
1838 static void fl_set_key_cfm_opcode(struct nlattr **tb,
1839 struct fl_flow_key *key,
1840 struct fl_flow_key *mask,
1841 struct netlink_ext_ack *extack)
1843 fl_set_key_val(tb, &key->cfm.opcode, TCA_FLOWER_KEY_CFM_OPCODE,
1844 &mask->cfm.opcode, TCA_FLOWER_UNSPEC,
1845 sizeof(key->cfm.opcode));
1848 static int fl_set_key_cfm(struct nlattr **tb,
1849 struct fl_flow_key *key,
1850 struct fl_flow_key *mask,
1851 struct netlink_ext_ack *extack)
1853 struct nlattr *nla_cfm_opt[TCA_FLOWER_KEY_CFM_OPT_MAX + 1];
1854 int err;
1856 if (!tb[TCA_FLOWER_KEY_CFM])
1857 return 0;
1859 err = nla_parse_nested(nla_cfm_opt, TCA_FLOWER_KEY_CFM_OPT_MAX,
1860 tb[TCA_FLOWER_KEY_CFM], cfm_opt_policy, extack);
1861 if (err < 0)
1862 return err;
1864 fl_set_key_cfm_opcode(nla_cfm_opt, key, mask, extack);
1865 fl_set_key_cfm_md_level(nla_cfm_opt, key, mask, extack);
1867 return 0;
1870 static int fl_set_key(struct net *net, struct nlattr *tca_opts,
1871 struct nlattr **tb, struct fl_flow_key *key,
1872 struct fl_flow_key *mask, struct netlink_ext_ack *extack)
1874 __be16 ethertype;
1875 int ret = 0;
1877 if (tb[TCA_FLOWER_INDEV]) {
1878 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1879 if (err < 0)
1880 return err;
1881 key->meta.ingress_ifindex = err;
1882 mask->meta.ingress_ifindex = 0xffffffff;
1885 fl_set_key_val(tb, &key->meta.l2_miss, TCA_FLOWER_L2_MISS,
1886 &mask->meta.l2_miss, TCA_FLOWER_UNSPEC,
1887 sizeof(key->meta.l2_miss));
1889 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1890 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1891 sizeof(key->eth.dst));
1892 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1893 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1894 sizeof(key->eth.src));
1895 fl_set_key_val(tb, &key->num_of_vlans,
1896 TCA_FLOWER_KEY_NUM_OF_VLANS,
1897 &mask->num_of_vlans,
1898 TCA_FLOWER_UNSPEC,
1899 sizeof(key->num_of_vlans));
1901 if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], &ethertype, key, mask, 0)) {
1902 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1903 TCA_FLOWER_KEY_VLAN_PRIO,
1904 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1905 &key->vlan, &mask->vlan);
1907 if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE],
1908 &ethertype, key, mask, 1)) {
1909 fl_set_key_vlan(tb, ethertype,
1910 TCA_FLOWER_KEY_CVLAN_ID,
1911 TCA_FLOWER_KEY_CVLAN_PRIO,
1912 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1913 &key->cvlan, &mask->cvlan);
1914 fl_set_key_val(tb, &key->basic.n_proto,
1915 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1916 &mask->basic.n_proto,
1917 TCA_FLOWER_UNSPEC,
1918 sizeof(key->basic.n_proto));
1922 if (key->basic.n_proto == htons(ETH_P_PPP_SES))
1923 fl_set_key_pppoe(tb, &key->pppoe, &mask->pppoe, key, mask);
1925 if (key->basic.n_proto == htons(ETH_P_IP) ||
1926 key->basic.n_proto == htons(ETH_P_IPV6)) {
1927 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1928 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1929 sizeof(key->basic.ip_proto));
1930 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1933 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1934 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1935 mask->control.addr_type = ~0;
1936 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1937 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1938 sizeof(key->ipv4.src));
1939 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1940 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1941 sizeof(key->ipv4.dst));
1942 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1943 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1944 mask->control.addr_type = ~0;
1945 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1946 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1947 sizeof(key->ipv6.src));
1948 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1949 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1950 sizeof(key->ipv6.dst));
1953 if (key->basic.ip_proto == IPPROTO_TCP) {
1954 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1955 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1956 sizeof(key->tp.src));
1957 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1958 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1959 sizeof(key->tp.dst));
1960 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1961 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1962 sizeof(key->tcp.flags));
1963 } else if (key->basic.ip_proto == IPPROTO_UDP) {
1964 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1965 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1966 sizeof(key->tp.src));
1967 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1968 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1969 sizeof(key->tp.dst));
1970 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
1971 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1972 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1973 sizeof(key->tp.src));
1974 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1975 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1976 sizeof(key->tp.dst));
1977 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
1978 key->basic.ip_proto == IPPROTO_ICMP) {
1979 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1980 &mask->icmp.type,
1981 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1982 sizeof(key->icmp.type));
1983 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1984 &mask->icmp.code,
1985 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1986 sizeof(key->icmp.code));
1987 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1988 key->basic.ip_proto == IPPROTO_ICMPV6) {
1989 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1990 &mask->icmp.type,
1991 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1992 sizeof(key->icmp.type));
1993 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1994 &mask->icmp.code,
1995 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1996 sizeof(key->icmp.code));
1997 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1998 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1999 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
2000 if (ret)
2001 return ret;
2002 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
2003 key->basic.n_proto == htons(ETH_P_RARP)) {
2004 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
2005 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
2006 sizeof(key->arp.sip));
2007 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
2008 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
2009 sizeof(key->arp.tip));
2010 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
2011 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
2012 sizeof(key->arp.op));
2013 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2014 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2015 sizeof(key->arp.sha));
2016 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2017 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2018 sizeof(key->arp.tha));
2019 } else if (key->basic.ip_proto == IPPROTO_L2TP) {
2020 fl_set_key_val(tb, &key->l2tpv3.session_id,
2021 TCA_FLOWER_KEY_L2TPV3_SID,
2022 &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC,
2023 sizeof(key->l2tpv3.session_id));
2024 } else if (key->basic.n_proto == htons(ETH_P_CFM)) {
2025 ret = fl_set_key_cfm(tb, key, mask, extack);
2026 if (ret)
2027 return ret;
2030 if (key->basic.ip_proto == IPPROTO_TCP ||
2031 key->basic.ip_proto == IPPROTO_UDP ||
2032 key->basic.ip_proto == IPPROTO_SCTP) {
2033 ret = fl_set_key_port_range(tb, key, mask, extack);
2034 if (ret)
2035 return ret;
2038 if (tb[TCA_FLOWER_KEY_SPI]) {
2039 ret = fl_set_key_spi(tb, key, mask, extack);
2040 if (ret)
2041 return ret;
2044 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
2045 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
2046 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2047 mask->enc_control.addr_type = ~0;
2048 fl_set_key_val(tb, &key->enc_ipv4.src,
2049 TCA_FLOWER_KEY_ENC_IPV4_SRC,
2050 &mask->enc_ipv4.src,
2051 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2052 sizeof(key->enc_ipv4.src));
2053 fl_set_key_val(tb, &key->enc_ipv4.dst,
2054 TCA_FLOWER_KEY_ENC_IPV4_DST,
2055 &mask->enc_ipv4.dst,
2056 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2057 sizeof(key->enc_ipv4.dst));
2060 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
2061 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
2062 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2063 mask->enc_control.addr_type = ~0;
2064 fl_set_key_val(tb, &key->enc_ipv6.src,
2065 TCA_FLOWER_KEY_ENC_IPV6_SRC,
2066 &mask->enc_ipv6.src,
2067 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2068 sizeof(key->enc_ipv6.src));
2069 fl_set_key_val(tb, &key->enc_ipv6.dst,
2070 TCA_FLOWER_KEY_ENC_IPV6_DST,
2071 &mask->enc_ipv6.dst,
2072 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2073 sizeof(key->enc_ipv6.dst));
2076 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
2077 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
2078 sizeof(key->enc_key_id.keyid));
2080 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2081 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2082 sizeof(key->enc_tp.src));
2084 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2085 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
2086 sizeof(key->enc_tp.dst));
2088 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
2090 fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
2091 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
2092 sizeof(key->hash.hash));
2094 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
2095 ret = fl_set_enc_opt(tb, key, mask, extack);
2096 if (ret)
2097 return ret;
2100 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
2101 if (ret)
2102 return ret;
2104 if (tb[TCA_FLOWER_KEY_FLAGS]) {
2105 ret = fl_set_key_flags(tca_opts, tb, false,
2106 &key->control.flags,
2107 &mask->control.flags, extack);
2108 if (ret)
2109 return ret;
2112 if (tb[TCA_FLOWER_KEY_ENC_FLAGS])
2113 ret = fl_set_key_flags(tca_opts, tb, true,
2114 &key->enc_control.flags,
2115 &mask->enc_control.flags, extack);
2117 return ret;
2120 static void fl_mask_copy(struct fl_flow_mask *dst,
2121 struct fl_flow_mask *src)
2123 const void *psrc = fl_key_get_start(&src->key, src);
2124 void *pdst = fl_key_get_start(&dst->key, src);
2126 memcpy(pdst, psrc, fl_mask_range(src));
2127 dst->range = src->range;
2130 static const struct rhashtable_params fl_ht_params = {
2131 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
2132 .head_offset = offsetof(struct cls_fl_filter, ht_node),
2133 .automatic_shrinking = true,
2136 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
2138 mask->filter_ht_params = fl_ht_params;
2139 mask->filter_ht_params.key_len = fl_mask_range(mask);
2140 mask->filter_ht_params.key_offset += mask->range.start;
2142 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
2145 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
2146 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
2148 #define FL_KEY_IS_MASKED(mask, member) \
2149 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
2150 0, FL_KEY_MEMBER_SIZE(member)) \
2152 #define FL_KEY_SET(keys, cnt, id, member) \
2153 do { \
2154 keys[cnt].key_id = id; \
2155 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
2156 cnt++; \
2157 } while(0);
2159 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
2160 do { \
2161 if (FL_KEY_IS_MASKED(mask, member)) \
2162 FL_KEY_SET(keys, cnt, id, member); \
2163 } while(0);
2165 static void fl_init_dissector(struct flow_dissector *dissector,
2166 struct fl_flow_key *mask)
2168 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
2169 size_t cnt = 0;
2171 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2172 FLOW_DISSECTOR_KEY_META, meta);
2173 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
2174 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
2175 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2176 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
2177 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2178 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
2179 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2180 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
2181 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2182 FLOW_DISSECTOR_KEY_PORTS, tp);
2183 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2184 FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
2185 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2186 FLOW_DISSECTOR_KEY_IP, ip);
2187 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2188 FLOW_DISSECTOR_KEY_TCP, tcp);
2189 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2190 FLOW_DISSECTOR_KEY_ICMP, icmp);
2191 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2192 FLOW_DISSECTOR_KEY_ARP, arp);
2193 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2194 FLOW_DISSECTOR_KEY_MPLS, mpls);
2195 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2196 FLOW_DISSECTOR_KEY_VLAN, vlan);
2197 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2198 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
2199 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2200 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
2201 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2202 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
2203 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2204 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
2205 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
2206 FL_KEY_IS_MASKED(mask, enc_ipv6) ||
2207 FL_KEY_IS_MASKED(mask, enc_control))
2208 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
2209 enc_control);
2210 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2211 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
2212 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2213 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
2214 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2215 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
2216 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2217 FLOW_DISSECTOR_KEY_CT, ct);
2218 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2219 FLOW_DISSECTOR_KEY_HASH, hash);
2220 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2221 FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans);
2222 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2223 FLOW_DISSECTOR_KEY_PPPOE, pppoe);
2224 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2225 FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3);
2226 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2227 FLOW_DISSECTOR_KEY_IPSEC, ipsec);
2228 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2229 FLOW_DISSECTOR_KEY_CFM, cfm);
2231 skb_flow_dissector_init(dissector, keys, cnt);
2234 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
2235 struct fl_flow_mask *mask)
2237 struct fl_flow_mask *newmask;
2238 int err;
2240 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
2241 if (!newmask)
2242 return ERR_PTR(-ENOMEM);
2244 fl_mask_copy(newmask, mask);
2246 if ((newmask->key.tp_range.tp_min.dst &&
2247 newmask->key.tp_range.tp_max.dst) ||
2248 (newmask->key.tp_range.tp_min.src &&
2249 newmask->key.tp_range.tp_max.src))
2250 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
2252 err = fl_init_mask_hashtable(newmask);
2253 if (err)
2254 goto errout_free;
2256 fl_init_dissector(&newmask->dissector, &newmask->key);
2258 INIT_LIST_HEAD_RCU(&newmask->filters);
2260 refcount_set(&newmask->refcnt, 1);
2261 err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
2262 &newmask->ht_node, mask_ht_params);
2263 if (err)
2264 goto errout_destroy;
2266 spin_lock(&head->masks_lock);
2267 list_add_tail_rcu(&newmask->list, &head->masks);
2268 spin_unlock(&head->masks_lock);
2270 return newmask;
2272 errout_destroy:
2273 rhashtable_destroy(&newmask->ht);
2274 errout_free:
2275 kfree(newmask);
2277 return ERR_PTR(err);
2280 static int fl_check_assign_mask(struct cls_fl_head *head,
2281 struct cls_fl_filter *fnew,
2282 struct cls_fl_filter *fold,
2283 struct fl_flow_mask *mask)
2285 struct fl_flow_mask *newmask;
2286 int ret = 0;
2288 rcu_read_lock();
2290 /* Insert mask as temporary node to prevent concurrent creation of mask
2291 * with same key. Any concurrent lookups with same key will return
2292 * -EAGAIN because mask's refcnt is zero.
2294 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
2295 &mask->ht_node,
2296 mask_ht_params);
2297 if (!fnew->mask) {
2298 rcu_read_unlock();
2300 if (fold) {
2301 ret = -EINVAL;
2302 goto errout_cleanup;
2305 newmask = fl_create_new_mask(head, mask);
2306 if (IS_ERR(newmask)) {
2307 ret = PTR_ERR(newmask);
2308 goto errout_cleanup;
2311 fnew->mask = newmask;
2312 return 0;
2313 } else if (IS_ERR(fnew->mask)) {
2314 ret = PTR_ERR(fnew->mask);
2315 } else if (fold && fold->mask != fnew->mask) {
2316 ret = -EINVAL;
2317 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
2318 /* Mask was deleted concurrently, try again */
2319 ret = -EAGAIN;
2321 rcu_read_unlock();
2322 return ret;
2324 errout_cleanup:
2325 rhashtable_remove_fast(&head->ht, &mask->ht_node,
2326 mask_ht_params);
2327 return ret;
2330 static bool fl_needs_tc_skb_ext(const struct fl_flow_key *mask)
2332 return mask->meta.l2_miss;
2335 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
2336 struct cls_fl_filter *fold,
2337 bool *in_ht)
2339 struct fl_flow_mask *mask = fnew->mask;
2340 int err;
2342 err = rhashtable_lookup_insert_fast(&mask->ht,
2343 &fnew->ht_node,
2344 mask->filter_ht_params);
2345 if (err) {
2346 *in_ht = false;
2347 /* It is okay if filter with same key exists when
2348 * overwriting.
2350 return fold && err == -EEXIST ? 0 : err;
2353 *in_ht = true;
2354 return 0;
2357 static int fl_change(struct net *net, struct sk_buff *in_skb,
2358 struct tcf_proto *tp, unsigned long base,
2359 u32 handle, struct nlattr **tca,
2360 void **arg, u32 flags,
2361 struct netlink_ext_ack *extack)
2363 struct cls_fl_head *head = fl_head_dereference(tp);
2364 bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
2365 struct nlattr *tca_opts = tca[TCA_OPTIONS];
2366 struct cls_fl_filter *fold = *arg;
2367 bool bound_to_filter = false;
2368 struct cls_fl_filter *fnew;
2369 struct fl_flow_mask *mask;
2370 struct nlattr **tb;
2371 bool in_ht;
2372 int err;
2374 if (!tca_opts) {
2375 err = -EINVAL;
2376 goto errout_fold;
2379 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
2380 if (!mask) {
2381 err = -ENOBUFS;
2382 goto errout_fold;
2385 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2386 if (!tb) {
2387 err = -ENOBUFS;
2388 goto errout_mask_alloc;
2391 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2392 tca_opts, fl_policy, NULL);
2393 if (err < 0)
2394 goto errout_tb;
2396 if (fold && handle && fold->handle != handle) {
2397 err = -EINVAL;
2398 goto errout_tb;
2401 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2402 if (!fnew) {
2403 err = -ENOBUFS;
2404 goto errout_tb;
2406 INIT_LIST_HEAD(&fnew->hw_list);
2407 refcount_set(&fnew->refcnt, 1);
2409 if (tb[TCA_FLOWER_FLAGS]) {
2410 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2412 if (!tc_flags_valid(fnew->flags)) {
2413 kfree(fnew);
2414 err = -EINVAL;
2415 goto errout_tb;
2419 if (!fold) {
2420 spin_lock(&tp->lock);
2421 if (!handle) {
2422 handle = 1;
2423 err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
2424 INT_MAX, GFP_ATOMIC);
2425 } else {
2426 err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
2427 handle, GFP_ATOMIC);
2429 /* Filter with specified handle was concurrently
2430 * inserted after initial check in cls_api. This is not
2431 * necessarily an error if NLM_F_EXCL is not set in
2432 * message flags. Returning EAGAIN will cause cls_api to
2433 * try to update concurrently inserted rule.
2435 if (err == -ENOSPC)
2436 err = -EAGAIN;
2438 spin_unlock(&tp->lock);
2440 if (err) {
2441 kfree(fnew);
2442 goto errout_tb;
2445 fnew->handle = handle;
2447 err = tcf_exts_init_ex(&fnew->exts, net, TCA_FLOWER_ACT, 0, tp, handle,
2448 !tc_skip_hw(fnew->flags));
2449 if (err < 0)
2450 goto errout_idr;
2452 err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE],
2453 &fnew->exts, flags, fnew->flags,
2454 extack);
2455 if (err < 0)
2456 goto errout_idr;
2458 if (tb[TCA_FLOWER_CLASSID]) {
2459 fnew->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
2460 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2461 rtnl_lock();
2462 tcf_bind_filter(tp, &fnew->res, base);
2463 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2464 rtnl_unlock();
2465 bound_to_filter = true;
2468 err = fl_set_key(net, tca_opts, tb, &fnew->key, &mask->key, extack);
2469 if (err)
2470 goto unbind_filter;
2472 fl_mask_update_range(mask);
2473 fl_set_masked_key(&fnew->mkey, &fnew->key, mask);
2475 if (!fl_mask_fits_tmplt(tp->chain->tmplt_priv, mask)) {
2476 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
2477 err = -EINVAL;
2478 goto unbind_filter;
2481 /* Enable tc skb extension if filter matches on data extracted from
2482 * this extension.
2484 if (fl_needs_tc_skb_ext(&mask->key)) {
2485 fnew->needs_tc_skb_ext = 1;
2486 tc_skb_ext_tc_enable();
2489 err = fl_check_assign_mask(head, fnew, fold, mask);
2490 if (err)
2491 goto unbind_filter;
2493 err = fl_ht_insert_unique(fnew, fold, &in_ht);
2494 if (err)
2495 goto errout_mask;
2497 if (!tc_skip_hw(fnew->flags)) {
2498 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2499 if (err)
2500 goto errout_ht;
2503 if (!tc_in_hw(fnew->flags))
2504 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2506 tcf_proto_update_usesw(tp, fnew->flags);
2508 spin_lock(&tp->lock);
2510 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2511 * proto again or create new one, if necessary.
2513 if (tp->deleting) {
2514 err = -EAGAIN;
2515 goto errout_hw;
2518 if (fold) {
2519 /* Fold filter was deleted concurrently. Retry lookup. */
2520 if (fold->deleted) {
2521 err = -EAGAIN;
2522 goto errout_hw;
2525 fnew->handle = handle;
2527 if (!in_ht) {
2528 struct rhashtable_params params =
2529 fnew->mask->filter_ht_params;
2531 err = rhashtable_insert_fast(&fnew->mask->ht,
2532 &fnew->ht_node,
2533 params);
2534 if (err)
2535 goto errout_hw;
2536 in_ht = true;
2539 refcount_inc(&fnew->refcnt);
2540 rhashtable_remove_fast(&fold->mask->ht,
2541 &fold->ht_node,
2542 fold->mask->filter_ht_params);
2543 idr_replace(&head->handle_idr, fnew, fnew->handle);
2544 list_replace_rcu(&fold->list, &fnew->list);
2545 fold->deleted = true;
2547 spin_unlock(&tp->lock);
2549 fl_mask_put(head, fold->mask);
2550 if (!tc_skip_hw(fold->flags))
2551 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2552 tcf_unbind_filter(tp, &fold->res);
2553 /* Caller holds reference to fold, so refcnt is always > 0
2554 * after this.
2556 refcount_dec(&fold->refcnt);
2557 __fl_put(fold);
2558 } else {
2559 idr_replace(&head->handle_idr, fnew, fnew->handle);
2561 refcount_inc(&fnew->refcnt);
2562 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2563 spin_unlock(&tp->lock);
2566 *arg = fnew;
2568 kfree(tb);
2569 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2570 return 0;
2572 errout_ht:
2573 spin_lock(&tp->lock);
2574 errout_hw:
2575 fnew->deleted = true;
2576 spin_unlock(&tp->lock);
2577 if (!tc_skip_hw(fnew->flags))
2578 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2579 if (in_ht)
2580 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2581 fnew->mask->filter_ht_params);
2582 errout_mask:
2583 fl_mask_put(head, fnew->mask);
2585 unbind_filter:
2586 if (bound_to_filter) {
2587 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2588 rtnl_lock();
2589 tcf_unbind_filter(tp, &fnew->res);
2590 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2591 rtnl_unlock();
2594 errout_idr:
2595 if (!fold) {
2596 spin_lock(&tp->lock);
2597 idr_remove(&head->handle_idr, fnew->handle);
2598 spin_unlock(&tp->lock);
2600 __fl_put(fnew);
2601 errout_tb:
2602 kfree(tb);
2603 errout_mask_alloc:
2604 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2605 errout_fold:
2606 if (fold)
2607 __fl_put(fold);
2608 return err;
2611 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2612 bool rtnl_held, struct netlink_ext_ack *extack)
2614 struct cls_fl_head *head = fl_head_dereference(tp);
2615 struct cls_fl_filter *f = arg;
2616 bool last_on_mask;
2617 int err = 0;
2619 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2620 *last = list_empty(&head->masks);
2621 __fl_put(f);
2623 return err;
2626 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2627 bool rtnl_held)
2629 struct cls_fl_head *head = fl_head_dereference(tp);
2630 unsigned long id = arg->cookie, tmp;
2631 struct cls_fl_filter *f;
2633 arg->count = arg->skip;
2635 rcu_read_lock();
2636 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2637 /* don't return filters that are being deleted */
2638 if (!f || !refcount_inc_not_zero(&f->refcnt))
2639 continue;
2640 rcu_read_unlock();
2642 if (arg->fn(tp, f, arg) < 0) {
2643 __fl_put(f);
2644 arg->stop = 1;
2645 rcu_read_lock();
2646 break;
2648 __fl_put(f);
2649 arg->count++;
2650 rcu_read_lock();
2652 rcu_read_unlock();
2653 arg->cookie = id;
2656 static struct cls_fl_filter *
2657 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2659 struct cls_fl_head *head = fl_head_dereference(tp);
2661 spin_lock(&tp->lock);
2662 if (list_empty(&head->hw_filters)) {
2663 spin_unlock(&tp->lock);
2664 return NULL;
2667 if (!f)
2668 f = list_entry(&head->hw_filters, struct cls_fl_filter,
2669 hw_list);
2670 list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2671 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2672 spin_unlock(&tp->lock);
2673 return f;
2677 spin_unlock(&tp->lock);
2678 return NULL;
2681 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2682 void *cb_priv, struct netlink_ext_ack *extack)
2684 struct tcf_block *block = tp->chain->block;
2685 struct flow_cls_offload cls_flower = {};
2686 struct cls_fl_filter *f = NULL;
2687 int err;
2689 /* hw_filters list can only be changed by hw offload functions after
2690 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2691 * iterating it.
2693 ASSERT_RTNL();
2695 while ((f = fl_get_next_hw_filter(tp, f, add))) {
2696 cls_flower.rule =
2697 flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2698 if (!cls_flower.rule) {
2699 __fl_put(f);
2700 return -ENOMEM;
2703 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2704 extack);
2705 cls_flower.command = add ?
2706 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2707 cls_flower.cookie = (unsigned long)f;
2708 cls_flower.rule->match.dissector = &f->mask->dissector;
2709 cls_flower.rule->match.mask = &f->mask->key;
2710 cls_flower.rule->match.key = &f->mkey;
2712 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
2713 cls_flower.common.extack);
2714 if (err) {
2715 kfree(cls_flower.rule);
2716 if (tc_skip_sw(f->flags)) {
2717 __fl_put(f);
2718 return err;
2720 goto next_flow;
2723 cls_flower.classid = f->res.classid;
2725 err = tc_setup_cb_reoffload(block, tp, add, cb,
2726 TC_SETUP_CLSFLOWER, &cls_flower,
2727 cb_priv, &f->flags,
2728 &f->in_hw_count);
2729 tc_cleanup_offload_action(&cls_flower.rule->action);
2730 kfree(cls_flower.rule);
2732 if (err) {
2733 __fl_put(f);
2734 return err;
2736 next_flow:
2737 __fl_put(f);
2740 return 0;
2743 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2745 struct flow_cls_offload *cls_flower = type_data;
2746 struct cls_fl_filter *f =
2747 (struct cls_fl_filter *) cls_flower->cookie;
2748 struct cls_fl_head *head = fl_head_dereference(tp);
2750 spin_lock(&tp->lock);
2751 list_add(&f->hw_list, &head->hw_filters);
2752 spin_unlock(&tp->lock);
2755 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2757 struct flow_cls_offload *cls_flower = type_data;
2758 struct cls_fl_filter *f =
2759 (struct cls_fl_filter *) cls_flower->cookie;
2761 spin_lock(&tp->lock);
2762 if (!list_empty(&f->hw_list))
2763 list_del_init(&f->hw_list);
2764 spin_unlock(&tp->lock);
2767 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2768 struct fl_flow_tmplt *tmplt)
2770 struct flow_cls_offload cls_flower = {};
2771 struct tcf_block *block = chain->block;
2773 cls_flower.rule = flow_rule_alloc(0);
2774 if (!cls_flower.rule)
2775 return -ENOMEM;
2777 cls_flower.common.chain_index = chain->index;
2778 cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2779 cls_flower.cookie = (unsigned long) tmplt;
2780 cls_flower.rule->match.dissector = &tmplt->dissector;
2781 cls_flower.rule->match.mask = &tmplt->mask;
2782 cls_flower.rule->match.key = &tmplt->dummy_key;
2784 /* We don't care if driver (any of them) fails to handle this
2785 * call. It serves just as a hint for it.
2787 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2788 kfree(cls_flower.rule);
2790 return 0;
2793 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2794 struct fl_flow_tmplt *tmplt)
2796 struct flow_cls_offload cls_flower = {};
2797 struct tcf_block *block = chain->block;
2799 cls_flower.common.chain_index = chain->index;
2800 cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2801 cls_flower.cookie = (unsigned long) tmplt;
2803 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2806 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2807 struct nlattr **tca,
2808 struct netlink_ext_ack *extack)
2810 struct nlattr *tca_opts = tca[TCA_OPTIONS];
2811 struct fl_flow_tmplt *tmplt;
2812 struct nlattr **tb;
2813 int err;
2815 if (!tca_opts)
2816 return ERR_PTR(-EINVAL);
2818 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2819 if (!tb)
2820 return ERR_PTR(-ENOBUFS);
2821 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2822 tca_opts, fl_policy, NULL);
2823 if (err)
2824 goto errout_tb;
2826 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2827 if (!tmplt) {
2828 err = -ENOMEM;
2829 goto errout_tb;
2831 tmplt->chain = chain;
2832 err = fl_set_key(net, tca_opts, tb, &tmplt->dummy_key,
2833 &tmplt->mask, extack);
2834 if (err)
2835 goto errout_tmplt;
2837 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2839 err = fl_hw_create_tmplt(chain, tmplt);
2840 if (err)
2841 goto errout_tmplt;
2843 kfree(tb);
2844 return tmplt;
2846 errout_tmplt:
2847 kfree(tmplt);
2848 errout_tb:
2849 kfree(tb);
2850 return ERR_PTR(err);
2853 static void fl_tmplt_destroy(void *tmplt_priv)
2855 struct fl_flow_tmplt *tmplt = tmplt_priv;
2857 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2858 kfree(tmplt);
2861 static void fl_tmplt_reoffload(struct tcf_chain *chain, bool add,
2862 flow_setup_cb_t *cb, void *cb_priv)
2864 struct fl_flow_tmplt *tmplt = chain->tmplt_priv;
2865 struct flow_cls_offload cls_flower = {};
2867 cls_flower.rule = flow_rule_alloc(0);
2868 if (!cls_flower.rule)
2869 return;
2871 cls_flower.common.chain_index = chain->index;
2872 cls_flower.command = add ? FLOW_CLS_TMPLT_CREATE :
2873 FLOW_CLS_TMPLT_DESTROY;
2874 cls_flower.cookie = (unsigned long) tmplt;
2875 cls_flower.rule->match.dissector = &tmplt->dissector;
2876 cls_flower.rule->match.mask = &tmplt->mask;
2877 cls_flower.rule->match.key = &tmplt->dummy_key;
2879 cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
2880 kfree(cls_flower.rule);
2883 static int fl_dump_key_val(struct sk_buff *skb,
2884 void *val, int val_type,
2885 void *mask, int mask_type, int len)
2887 int err;
2889 if (!memchr_inv(mask, 0, len))
2890 return 0;
2891 err = nla_put(skb, val_type, len, val);
2892 if (err)
2893 return err;
2894 if (mask_type != TCA_FLOWER_UNSPEC) {
2895 err = nla_put(skb, mask_type, len, mask);
2896 if (err)
2897 return err;
2899 return 0;
2902 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2903 struct fl_flow_key *mask)
2905 if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2906 TCA_FLOWER_KEY_PORT_DST_MIN,
2907 &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2908 sizeof(key->tp_range.tp_min.dst)) ||
2909 fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2910 TCA_FLOWER_KEY_PORT_DST_MAX,
2911 &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2912 sizeof(key->tp_range.tp_max.dst)) ||
2913 fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2914 TCA_FLOWER_KEY_PORT_SRC_MIN,
2915 &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2916 sizeof(key->tp_range.tp_min.src)) ||
2917 fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2918 TCA_FLOWER_KEY_PORT_SRC_MAX,
2919 &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2920 sizeof(key->tp_range.tp_max.src)))
2921 return -1;
2923 return 0;
2926 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2927 struct flow_dissector_key_mpls *mpls_key,
2928 struct flow_dissector_key_mpls *mpls_mask,
2929 u8 lse_index)
2931 struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2932 struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2933 int err;
2935 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2936 lse_index + 1);
2937 if (err)
2938 return err;
2940 if (lse_mask->mpls_ttl) {
2941 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2942 lse_key->mpls_ttl);
2943 if (err)
2944 return err;
2946 if (lse_mask->mpls_bos) {
2947 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2948 lse_key->mpls_bos);
2949 if (err)
2950 return err;
2952 if (lse_mask->mpls_tc) {
2953 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2954 lse_key->mpls_tc);
2955 if (err)
2956 return err;
2958 if (lse_mask->mpls_label) {
2959 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2960 lse_key->mpls_label);
2961 if (err)
2962 return err;
2965 return 0;
2968 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2969 struct flow_dissector_key_mpls *mpls_key,
2970 struct flow_dissector_key_mpls *mpls_mask)
2972 struct nlattr *opts;
2973 struct nlattr *lse;
2974 u8 lse_index;
2975 int err;
2977 opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2978 if (!opts)
2979 return -EMSGSIZE;
2981 for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2982 if (!(mpls_mask->used_lses & 1 << lse_index))
2983 continue;
2985 lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2986 if (!lse) {
2987 err = -EMSGSIZE;
2988 goto err_opts;
2991 err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2992 lse_index);
2993 if (err)
2994 goto err_opts_lse;
2995 nla_nest_end(skb, lse);
2997 nla_nest_end(skb, opts);
2999 return 0;
3001 err_opts_lse:
3002 nla_nest_cancel(skb, lse);
3003 err_opts:
3004 nla_nest_cancel(skb, opts);
3006 return err;
3009 static int fl_dump_key_mpls(struct sk_buff *skb,
3010 struct flow_dissector_key_mpls *mpls_key,
3011 struct flow_dissector_key_mpls *mpls_mask)
3013 struct flow_dissector_mpls_lse *lse_mask;
3014 struct flow_dissector_mpls_lse *lse_key;
3015 int err;
3017 if (!mpls_mask->used_lses)
3018 return 0;
3020 lse_mask = &mpls_mask->ls[0];
3021 lse_key = &mpls_key->ls[0];
3023 /* For backward compatibility, don't use the MPLS nested attributes if
3024 * the rule can be expressed using the old attributes.
3026 if (mpls_mask->used_lses & ~1 ||
3027 (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
3028 !lse_mask->mpls_tc && !lse_mask->mpls_label))
3029 return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
3031 if (lse_mask->mpls_ttl) {
3032 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
3033 lse_key->mpls_ttl);
3034 if (err)
3035 return err;
3037 if (lse_mask->mpls_tc) {
3038 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
3039 lse_key->mpls_tc);
3040 if (err)
3041 return err;
3043 if (lse_mask->mpls_label) {
3044 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
3045 lse_key->mpls_label);
3046 if (err)
3047 return err;
3049 if (lse_mask->mpls_bos) {
3050 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
3051 lse_key->mpls_bos);
3052 if (err)
3053 return err;
3055 return 0;
3058 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
3059 struct flow_dissector_key_ip *key,
3060 struct flow_dissector_key_ip *mask)
3062 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
3063 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
3064 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
3065 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
3067 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
3068 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
3069 return -1;
3071 return 0;
3074 static int fl_dump_key_vlan(struct sk_buff *skb,
3075 int vlan_id_key, int vlan_prio_key,
3076 struct flow_dissector_key_vlan *vlan_key,
3077 struct flow_dissector_key_vlan *vlan_mask)
3079 int err;
3081 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
3082 return 0;
3083 if (vlan_mask->vlan_id) {
3084 err = nla_put_u16(skb, vlan_id_key,
3085 vlan_key->vlan_id);
3086 if (err)
3087 return err;
3089 if (vlan_mask->vlan_priority) {
3090 err = nla_put_u8(skb, vlan_prio_key,
3091 vlan_key->vlan_priority);
3092 if (err)
3093 return err;
3095 return 0;
3098 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
3099 u32 *flower_key, u32 *flower_mask,
3100 u32 flower_flag_bit, u32 dissector_flag_bit)
3102 if (dissector_mask & dissector_flag_bit) {
3103 *flower_mask |= flower_flag_bit;
3104 if (dissector_key & dissector_flag_bit)
3105 *flower_key |= flower_flag_bit;
3109 static int fl_dump_key_flags(struct sk_buff *skb, bool encap,
3110 u32 flags_key, u32 flags_mask)
3112 int fl_key, fl_mask;
3113 __be32 _key, _mask;
3114 u32 key, mask;
3115 int err;
3117 if (encap) {
3118 fl_key = TCA_FLOWER_KEY_ENC_FLAGS;
3119 fl_mask = TCA_FLOWER_KEY_ENC_FLAGS_MASK;
3120 } else {
3121 fl_key = TCA_FLOWER_KEY_FLAGS;
3122 fl_mask = TCA_FLOWER_KEY_FLAGS_MASK;
3125 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
3126 return 0;
3128 key = 0;
3129 mask = 0;
3131 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
3132 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
3133 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
3134 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
3135 FLOW_DIS_FIRST_FRAG);
3137 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
3138 TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM,
3139 FLOW_DIS_F_TUNNEL_CSUM);
3141 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
3142 TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT,
3143 FLOW_DIS_F_TUNNEL_DONT_FRAGMENT);
3145 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
3146 TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, FLOW_DIS_F_TUNNEL_OAM);
3148 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
3149 TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT,
3150 FLOW_DIS_F_TUNNEL_CRIT_OPT);
3152 _key = cpu_to_be32(key);
3153 _mask = cpu_to_be32(mask);
3155 err = nla_put(skb, fl_key, 4, &_key);
3156 if (err)
3157 return err;
3159 return nla_put(skb, fl_mask, 4, &_mask);
3162 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
3163 struct flow_dissector_key_enc_opts *enc_opts)
3165 struct geneve_opt *opt;
3166 struct nlattr *nest;
3167 int opt_off = 0;
3169 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
3170 if (!nest)
3171 goto nla_put_failure;
3173 while (enc_opts->len > opt_off) {
3174 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
3176 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
3177 opt->opt_class))
3178 goto nla_put_failure;
3179 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
3180 opt->type))
3181 goto nla_put_failure;
3182 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
3183 opt->length * 4, opt->opt_data))
3184 goto nla_put_failure;
3186 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
3188 nla_nest_end(skb, nest);
3189 return 0;
3191 nla_put_failure:
3192 nla_nest_cancel(skb, nest);
3193 return -EMSGSIZE;
3196 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
3197 struct flow_dissector_key_enc_opts *enc_opts)
3199 struct vxlan_metadata *md;
3200 struct nlattr *nest;
3202 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
3203 if (!nest)
3204 goto nla_put_failure;
3206 md = (struct vxlan_metadata *)&enc_opts->data[0];
3207 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
3208 goto nla_put_failure;
3210 nla_nest_end(skb, nest);
3211 return 0;
3213 nla_put_failure:
3214 nla_nest_cancel(skb, nest);
3215 return -EMSGSIZE;
3218 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
3219 struct flow_dissector_key_enc_opts *enc_opts)
3221 struct erspan_metadata *md;
3222 struct nlattr *nest;
3224 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
3225 if (!nest)
3226 goto nla_put_failure;
3228 md = (struct erspan_metadata *)&enc_opts->data[0];
3229 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
3230 goto nla_put_failure;
3232 if (md->version == 1 &&
3233 nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
3234 goto nla_put_failure;
3236 if (md->version == 2 &&
3237 (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
3238 md->u.md2.dir) ||
3239 nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
3240 get_hwid(&md->u.md2))))
3241 goto nla_put_failure;
3243 nla_nest_end(skb, nest);
3244 return 0;
3246 nla_put_failure:
3247 nla_nest_cancel(skb, nest);
3248 return -EMSGSIZE;
3251 static int fl_dump_key_gtp_opt(struct sk_buff *skb,
3252 struct flow_dissector_key_enc_opts *enc_opts)
3255 struct gtp_pdu_session_info *session_info;
3256 struct nlattr *nest;
3258 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP);
3259 if (!nest)
3260 goto nla_put_failure;
3262 session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0];
3264 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
3265 session_info->pdu_type))
3266 goto nla_put_failure;
3268 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi))
3269 goto nla_put_failure;
3271 nla_nest_end(skb, nest);
3272 return 0;
3274 nla_put_failure:
3275 nla_nest_cancel(skb, nest);
3276 return -EMSGSIZE;
3279 static int fl_dump_key_pfcp_opt(struct sk_buff *skb,
3280 struct flow_dissector_key_enc_opts *enc_opts)
3282 struct pfcp_metadata *md;
3283 struct nlattr *nest;
3285 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_PFCP);
3286 if (!nest)
3287 goto nla_put_failure;
3289 md = (struct pfcp_metadata *)&enc_opts->data[0];
3290 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE, md->type))
3291 goto nla_put_failure;
3293 if (nla_put_be64(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID,
3294 md->seid, 0))
3295 goto nla_put_failure;
3297 nla_nest_end(skb, nest);
3298 return 0;
3300 nla_put_failure:
3301 nla_nest_cancel(skb, nest);
3302 return -EMSGSIZE;
3305 static int fl_dump_key_ct(struct sk_buff *skb,
3306 struct flow_dissector_key_ct *key,
3307 struct flow_dissector_key_ct *mask)
3309 if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
3310 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
3311 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
3312 sizeof(key->ct_state)))
3313 goto nla_put_failure;
3315 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
3316 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
3317 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
3318 sizeof(key->ct_zone)))
3319 goto nla_put_failure;
3321 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
3322 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
3323 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
3324 sizeof(key->ct_mark)))
3325 goto nla_put_failure;
3327 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
3328 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
3329 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
3330 sizeof(key->ct_labels)))
3331 goto nla_put_failure;
3333 return 0;
3335 nla_put_failure:
3336 return -EMSGSIZE;
3339 static int fl_dump_key_cfm(struct sk_buff *skb,
3340 struct flow_dissector_key_cfm *key,
3341 struct flow_dissector_key_cfm *mask)
3343 struct nlattr *opts;
3344 int err;
3345 u8 mdl;
3347 if (!memchr_inv(mask, 0, sizeof(*mask)))
3348 return 0;
3350 opts = nla_nest_start(skb, TCA_FLOWER_KEY_CFM);
3351 if (!opts)
3352 return -EMSGSIZE;
3354 if (FIELD_GET(FLOW_DIS_CFM_MDL_MASK, mask->mdl_ver)) {
3355 mdl = FIELD_GET(FLOW_DIS_CFM_MDL_MASK, key->mdl_ver);
3356 err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_MD_LEVEL, mdl);
3357 if (err)
3358 goto err_cfm_opts;
3361 if (mask->opcode) {
3362 err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_OPCODE, key->opcode);
3363 if (err)
3364 goto err_cfm_opts;
3367 nla_nest_end(skb, opts);
3369 return 0;
3371 err_cfm_opts:
3372 nla_nest_cancel(skb, opts);
3373 return err;
3376 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
3377 struct flow_dissector_key_enc_opts *enc_opts)
3379 struct nlattr *nest;
3380 int err;
3382 if (!enc_opts->len)
3383 return 0;
3385 nest = nla_nest_start_noflag(skb, enc_opt_type);
3386 if (!nest)
3387 goto nla_put_failure;
3389 switch (enc_opts->dst_opt_type) {
3390 case IP_TUNNEL_GENEVE_OPT_BIT:
3391 err = fl_dump_key_geneve_opt(skb, enc_opts);
3392 if (err)
3393 goto nla_put_failure;
3394 break;
3395 case IP_TUNNEL_VXLAN_OPT_BIT:
3396 err = fl_dump_key_vxlan_opt(skb, enc_opts);
3397 if (err)
3398 goto nla_put_failure;
3399 break;
3400 case IP_TUNNEL_ERSPAN_OPT_BIT:
3401 err = fl_dump_key_erspan_opt(skb, enc_opts);
3402 if (err)
3403 goto nla_put_failure;
3404 break;
3405 case IP_TUNNEL_GTP_OPT_BIT:
3406 err = fl_dump_key_gtp_opt(skb, enc_opts);
3407 if (err)
3408 goto nla_put_failure;
3409 break;
3410 case IP_TUNNEL_PFCP_OPT_BIT:
3411 err = fl_dump_key_pfcp_opt(skb, enc_opts);
3412 if (err)
3413 goto nla_put_failure;
3414 break;
3415 default:
3416 goto nla_put_failure;
3418 nla_nest_end(skb, nest);
3419 return 0;
3421 nla_put_failure:
3422 nla_nest_cancel(skb, nest);
3423 return -EMSGSIZE;
3426 static int fl_dump_key_enc_opt(struct sk_buff *skb,
3427 struct flow_dissector_key_enc_opts *key_opts,
3428 struct flow_dissector_key_enc_opts *msk_opts)
3430 int err;
3432 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
3433 if (err)
3434 return err;
3436 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
3439 static int fl_dump_key(struct sk_buff *skb, struct net *net,
3440 struct fl_flow_key *key, struct fl_flow_key *mask)
3442 if (mask->meta.ingress_ifindex) {
3443 struct net_device *dev;
3445 dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
3446 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
3447 goto nla_put_failure;
3450 if (fl_dump_key_val(skb, &key->meta.l2_miss,
3451 TCA_FLOWER_L2_MISS, &mask->meta.l2_miss,
3452 TCA_FLOWER_UNSPEC, sizeof(key->meta.l2_miss)))
3453 goto nla_put_failure;
3455 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
3456 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
3457 sizeof(key->eth.dst)) ||
3458 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
3459 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
3460 sizeof(key->eth.src)) ||
3461 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
3462 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
3463 sizeof(key->basic.n_proto)))
3464 goto nla_put_failure;
3466 if (mask->num_of_vlans.num_of_vlans) {
3467 if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans))
3468 goto nla_put_failure;
3471 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
3472 goto nla_put_failure;
3474 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
3475 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
3476 goto nla_put_failure;
3478 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
3479 TCA_FLOWER_KEY_CVLAN_PRIO,
3480 &key->cvlan, &mask->cvlan) ||
3481 (mask->cvlan.vlan_tpid &&
3482 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3483 key->cvlan.vlan_tpid)))
3484 goto nla_put_failure;
3486 if (mask->basic.n_proto) {
3487 if (mask->cvlan.vlan_eth_type) {
3488 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
3489 key->basic.n_proto))
3490 goto nla_put_failure;
3491 } else if (mask->vlan.vlan_eth_type) {
3492 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3493 key->vlan.vlan_eth_type))
3494 goto nla_put_failure;
3498 if ((key->basic.n_proto == htons(ETH_P_IP) ||
3499 key->basic.n_proto == htons(ETH_P_IPV6)) &&
3500 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
3501 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
3502 sizeof(key->basic.ip_proto)) ||
3503 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
3504 goto nla_put_failure;
3506 if (mask->pppoe.session_id) {
3507 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPPOE_SID,
3508 key->pppoe.session_id))
3509 goto nla_put_failure;
3511 if (mask->basic.n_proto && mask->pppoe.ppp_proto) {
3512 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPP_PROTO,
3513 key->pppoe.ppp_proto))
3514 goto nla_put_failure;
3517 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3518 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
3519 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
3520 sizeof(key->ipv4.src)) ||
3521 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
3522 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
3523 sizeof(key->ipv4.dst))))
3524 goto nla_put_failure;
3525 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3526 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
3527 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
3528 sizeof(key->ipv6.src)) ||
3529 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
3530 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
3531 sizeof(key->ipv6.dst))))
3532 goto nla_put_failure;
3534 if (key->basic.ip_proto == IPPROTO_TCP &&
3535 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
3536 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
3537 sizeof(key->tp.src)) ||
3538 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
3539 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
3540 sizeof(key->tp.dst)) ||
3541 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
3542 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
3543 sizeof(key->tcp.flags))))
3544 goto nla_put_failure;
3545 else if (key->basic.ip_proto == IPPROTO_UDP &&
3546 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
3547 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
3548 sizeof(key->tp.src)) ||
3549 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
3550 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
3551 sizeof(key->tp.dst))))
3552 goto nla_put_failure;
3553 else if (key->basic.ip_proto == IPPROTO_SCTP &&
3554 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
3555 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
3556 sizeof(key->tp.src)) ||
3557 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
3558 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
3559 sizeof(key->tp.dst))))
3560 goto nla_put_failure;
3561 else if (key->basic.n_proto == htons(ETH_P_IP) &&
3562 key->basic.ip_proto == IPPROTO_ICMP &&
3563 (fl_dump_key_val(skb, &key->icmp.type,
3564 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
3565 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
3566 sizeof(key->icmp.type)) ||
3567 fl_dump_key_val(skb, &key->icmp.code,
3568 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
3569 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
3570 sizeof(key->icmp.code))))
3571 goto nla_put_failure;
3572 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
3573 key->basic.ip_proto == IPPROTO_ICMPV6 &&
3574 (fl_dump_key_val(skb, &key->icmp.type,
3575 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
3576 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
3577 sizeof(key->icmp.type)) ||
3578 fl_dump_key_val(skb, &key->icmp.code,
3579 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
3580 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
3581 sizeof(key->icmp.code))))
3582 goto nla_put_failure;
3583 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
3584 key->basic.n_proto == htons(ETH_P_RARP)) &&
3585 (fl_dump_key_val(skb, &key->arp.sip,
3586 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
3587 TCA_FLOWER_KEY_ARP_SIP_MASK,
3588 sizeof(key->arp.sip)) ||
3589 fl_dump_key_val(skb, &key->arp.tip,
3590 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
3591 TCA_FLOWER_KEY_ARP_TIP_MASK,
3592 sizeof(key->arp.tip)) ||
3593 fl_dump_key_val(skb, &key->arp.op,
3594 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
3595 TCA_FLOWER_KEY_ARP_OP_MASK,
3596 sizeof(key->arp.op)) ||
3597 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
3598 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
3599 sizeof(key->arp.sha)) ||
3600 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
3601 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
3602 sizeof(key->arp.tha))))
3603 goto nla_put_failure;
3604 else if (key->basic.ip_proto == IPPROTO_L2TP &&
3605 fl_dump_key_val(skb, &key->l2tpv3.session_id,
3606 TCA_FLOWER_KEY_L2TPV3_SID,
3607 &mask->l2tpv3.session_id,
3608 TCA_FLOWER_UNSPEC,
3609 sizeof(key->l2tpv3.session_id)))
3610 goto nla_put_failure;
3612 if (key->ipsec.spi &&
3613 fl_dump_key_val(skb, &key->ipsec.spi, TCA_FLOWER_KEY_SPI,
3614 &mask->ipsec.spi, TCA_FLOWER_KEY_SPI_MASK,
3615 sizeof(key->ipsec.spi)))
3616 goto nla_put_failure;
3618 if ((key->basic.ip_proto == IPPROTO_TCP ||
3619 key->basic.ip_proto == IPPROTO_UDP ||
3620 key->basic.ip_proto == IPPROTO_SCTP) &&
3621 fl_dump_key_port_range(skb, key, mask))
3622 goto nla_put_failure;
3624 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3625 (fl_dump_key_val(skb, &key->enc_ipv4.src,
3626 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3627 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3628 sizeof(key->enc_ipv4.src)) ||
3629 fl_dump_key_val(skb, &key->enc_ipv4.dst,
3630 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3631 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3632 sizeof(key->enc_ipv4.dst))))
3633 goto nla_put_failure;
3634 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3635 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3636 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3637 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3638 sizeof(key->enc_ipv6.src)) ||
3639 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3640 TCA_FLOWER_KEY_ENC_IPV6_DST,
3641 &mask->enc_ipv6.dst,
3642 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3643 sizeof(key->enc_ipv6.dst))))
3644 goto nla_put_failure;
3646 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3647 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3648 sizeof(key->enc_key_id)) ||
3649 fl_dump_key_val(skb, &key->enc_tp.src,
3650 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3651 &mask->enc_tp.src,
3652 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3653 sizeof(key->enc_tp.src)) ||
3654 fl_dump_key_val(skb, &key->enc_tp.dst,
3655 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3656 &mask->enc_tp.dst,
3657 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3658 sizeof(key->enc_tp.dst)) ||
3659 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3660 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3661 goto nla_put_failure;
3663 if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3664 goto nla_put_failure;
3666 if (fl_dump_key_flags(skb, false, key->control.flags,
3667 mask->control.flags))
3668 goto nla_put_failure;
3670 if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3671 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3672 sizeof(key->hash.hash)))
3673 goto nla_put_failure;
3675 if (fl_dump_key_cfm(skb, &key->cfm, &mask->cfm))
3676 goto nla_put_failure;
3678 if (fl_dump_key_flags(skb, true, key->enc_control.flags,
3679 mask->enc_control.flags))
3680 goto nla_put_failure;
3682 return 0;
3684 nla_put_failure:
3685 return -EMSGSIZE;
3688 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3689 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3691 struct cls_fl_filter *f = fh;
3692 struct nlattr *nest;
3693 struct fl_flow_key *key, *mask;
3694 bool skip_hw;
3696 if (!f)
3697 return skb->len;
3699 t->tcm_handle = f->handle;
3701 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3702 if (!nest)
3703 goto nla_put_failure;
3705 spin_lock(&tp->lock);
3707 if (f->res.classid &&
3708 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3709 goto nla_put_failure_locked;
3711 key = &f->key;
3712 mask = &f->mask->key;
3713 skip_hw = tc_skip_hw(f->flags);
3715 if (fl_dump_key(skb, net, key, mask))
3716 goto nla_put_failure_locked;
3718 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3719 goto nla_put_failure_locked;
3721 spin_unlock(&tp->lock);
3723 if (!skip_hw)
3724 fl_hw_update_stats(tp, f, rtnl_held);
3726 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3727 goto nla_put_failure;
3729 if (tcf_exts_dump(skb, &f->exts))
3730 goto nla_put_failure;
3732 nla_nest_end(skb, nest);
3734 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3735 goto nla_put_failure;
3737 return skb->len;
3739 nla_put_failure_locked:
3740 spin_unlock(&tp->lock);
3741 nla_put_failure:
3742 nla_nest_cancel(skb, nest);
3743 return -1;
3746 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3747 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3749 struct cls_fl_filter *f = fh;
3750 struct nlattr *nest;
3751 bool skip_hw;
3753 if (!f)
3754 return skb->len;
3756 t->tcm_handle = f->handle;
3758 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3759 if (!nest)
3760 goto nla_put_failure;
3762 spin_lock(&tp->lock);
3764 skip_hw = tc_skip_hw(f->flags);
3766 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3767 goto nla_put_failure_locked;
3769 spin_unlock(&tp->lock);
3771 if (!skip_hw)
3772 fl_hw_update_stats(tp, f, rtnl_held);
3774 if (tcf_exts_terse_dump(skb, &f->exts))
3775 goto nla_put_failure;
3777 nla_nest_end(skb, nest);
3779 return skb->len;
3781 nla_put_failure_locked:
3782 spin_unlock(&tp->lock);
3783 nla_put_failure:
3784 nla_nest_cancel(skb, nest);
3785 return -1;
3788 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3790 struct fl_flow_tmplt *tmplt = tmplt_priv;
3791 struct fl_flow_key *key, *mask;
3792 struct nlattr *nest;
3794 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3795 if (!nest)
3796 goto nla_put_failure;
3798 key = &tmplt->dummy_key;
3799 mask = &tmplt->mask;
3801 if (fl_dump_key(skb, net, key, mask))
3802 goto nla_put_failure;
3804 nla_nest_end(skb, nest);
3806 return skb->len;
3808 nla_put_failure:
3809 nla_nest_cancel(skb, nest);
3810 return -EMSGSIZE;
3813 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3814 unsigned long base)
3816 struct cls_fl_filter *f = fh;
3818 tc_cls_bind_class(classid, cl, q, &f->res, base);
3821 static bool fl_delete_empty(struct tcf_proto *tp)
3823 struct cls_fl_head *head = fl_head_dereference(tp);
3825 spin_lock(&tp->lock);
3826 tp->deleting = idr_is_empty(&head->handle_idr);
3827 spin_unlock(&tp->lock);
3829 return tp->deleting;
3832 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3833 .kind = "flower",
3834 .classify = fl_classify,
3835 .init = fl_init,
3836 .destroy = fl_destroy,
3837 .get = fl_get,
3838 .put = fl_put,
3839 .change = fl_change,
3840 .delete = fl_delete,
3841 .delete_empty = fl_delete_empty,
3842 .walk = fl_walk,
3843 .reoffload = fl_reoffload,
3844 .hw_add = fl_hw_add,
3845 .hw_del = fl_hw_del,
3846 .dump = fl_dump,
3847 .terse_dump = fl_terse_dump,
3848 .bind_class = fl_bind_class,
3849 .tmplt_create = fl_tmplt_create,
3850 .tmplt_destroy = fl_tmplt_destroy,
3851 .tmplt_reoffload = fl_tmplt_reoffload,
3852 .tmplt_dump = fl_tmplt_dump,
3853 .get_exts = fl_get_exts,
3854 .owner = THIS_MODULE,
3855 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
3857 MODULE_ALIAS_NET_CLS("flower");
3859 static int __init cls_fl_init(void)
3861 return register_tcf_proto_ops(&cls_fl_ops);
3864 static void __exit cls_fl_exit(void)
3866 unregister_tcf_proto_ops(&cls_fl_ops);
3869 module_init(cls_fl_init);
3870 module_exit(cls_fl_exit);
3872 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3873 MODULE_DESCRIPTION("Flower classifier");
3874 MODULE_LICENSE("GPL v2");