1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_flower.c Flower classifier
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
18 #include <linux/mpls.h>
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
23 #include <net/flow_dissector.h>
24 #include <net/geneve.h>
25 #include <net/vxlan.h>
26 #include <net/erspan.h>
29 #include <net/dst_metadata.h>
31 #include <uapi/linux/netfilter/nf_conntrack_common.h>
34 struct flow_dissector_key_meta meta
;
35 struct flow_dissector_key_control control
;
36 struct flow_dissector_key_control enc_control
;
37 struct flow_dissector_key_basic basic
;
38 struct flow_dissector_key_eth_addrs eth
;
39 struct flow_dissector_key_vlan vlan
;
40 struct flow_dissector_key_vlan cvlan
;
42 struct flow_dissector_key_ipv4_addrs ipv4
;
43 struct flow_dissector_key_ipv6_addrs ipv6
;
45 struct flow_dissector_key_ports tp
;
46 struct flow_dissector_key_icmp icmp
;
47 struct flow_dissector_key_arp arp
;
48 struct flow_dissector_key_keyid enc_key_id
;
50 struct flow_dissector_key_ipv4_addrs enc_ipv4
;
51 struct flow_dissector_key_ipv6_addrs enc_ipv6
;
53 struct flow_dissector_key_ports enc_tp
;
54 struct flow_dissector_key_mpls mpls
;
55 struct flow_dissector_key_tcp tcp
;
56 struct flow_dissector_key_ip ip
;
57 struct flow_dissector_key_ip enc_ip
;
58 struct flow_dissector_key_enc_opts enc_opts
;
60 struct flow_dissector_key_ports tp
;
62 struct flow_dissector_key_ports tp_min
;
63 struct flow_dissector_key_ports tp_max
;
66 struct flow_dissector_key_ct ct
;
67 } __aligned(BITS_PER_LONG
/ 8); /* Ensure that we can do comparisons as longs. */
69 struct fl_flow_mask_range
{
70 unsigned short int start
;
71 unsigned short int end
;
75 struct fl_flow_key key
;
76 struct fl_flow_mask_range range
;
78 struct rhash_head ht_node
;
80 struct rhashtable_params filter_ht_params
;
81 struct flow_dissector dissector
;
82 struct list_head filters
;
83 struct rcu_work rwork
;
84 struct list_head list
;
88 struct fl_flow_tmplt
{
89 struct fl_flow_key dummy_key
;
90 struct fl_flow_key mask
;
91 struct flow_dissector dissector
;
92 struct tcf_chain
*chain
;
97 spinlock_t masks_lock
; /* Protect masks list */
98 struct list_head masks
;
99 struct list_head hw_filters
;
100 struct rcu_work rwork
;
101 struct idr handle_idr
;
104 struct cls_fl_filter
{
105 struct fl_flow_mask
*mask
;
106 struct rhash_head ht_node
;
107 struct fl_flow_key mkey
;
108 struct tcf_exts exts
;
109 struct tcf_result res
;
110 struct fl_flow_key key
;
111 struct list_head list
;
112 struct list_head hw_list
;
116 struct rcu_work rwork
;
117 struct net_device
*hw_dev
;
118 /* Flower classifier is unlocked, which means that its reference counter
119 * can be changed concurrently without any kind of external
120 * synchronization. Use atomic reference counter to be concurrency-safe.
126 static const struct rhashtable_params mask_ht_params
= {
127 .key_offset
= offsetof(struct fl_flow_mask
, key
),
128 .key_len
= sizeof(struct fl_flow_key
),
129 .head_offset
= offsetof(struct fl_flow_mask
, ht_node
),
130 .automatic_shrinking
= true,
133 static unsigned short int fl_mask_range(const struct fl_flow_mask
*mask
)
135 return mask
->range
.end
- mask
->range
.start
;
138 static void fl_mask_update_range(struct fl_flow_mask
*mask
)
140 const u8
*bytes
= (const u8
*) &mask
->key
;
141 size_t size
= sizeof(mask
->key
);
142 size_t i
, first
= 0, last
;
144 for (i
= 0; i
< size
; i
++) {
151 for (i
= size
- 1; i
!= first
; i
--) {
157 mask
->range
.start
= rounddown(first
, sizeof(long));
158 mask
->range
.end
= roundup(last
+ 1, sizeof(long));
161 static void *fl_key_get_start(struct fl_flow_key
*key
,
162 const struct fl_flow_mask
*mask
)
164 return (u8
*) key
+ mask
->range
.start
;
167 static void fl_set_masked_key(struct fl_flow_key
*mkey
, struct fl_flow_key
*key
,
168 struct fl_flow_mask
*mask
)
170 const long *lkey
= fl_key_get_start(key
, mask
);
171 const long *lmask
= fl_key_get_start(&mask
->key
, mask
);
172 long *lmkey
= fl_key_get_start(mkey
, mask
);
175 for (i
= 0; i
< fl_mask_range(mask
); i
+= sizeof(long))
176 *lmkey
++ = *lkey
++ & *lmask
++;
179 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt
*tmplt
,
180 struct fl_flow_mask
*mask
)
182 const long *lmask
= fl_key_get_start(&mask
->key
, mask
);
188 ltmplt
= fl_key_get_start(&tmplt
->mask
, mask
);
189 for (i
= 0; i
< fl_mask_range(mask
); i
+= sizeof(long)) {
190 if (~*ltmplt
++ & *lmask
++)
196 static void fl_clear_masked_range(struct fl_flow_key
*key
,
197 struct fl_flow_mask
*mask
)
199 memset(fl_key_get_start(key
, mask
), 0, fl_mask_range(mask
));
202 static bool fl_range_port_dst_cmp(struct cls_fl_filter
*filter
,
203 struct fl_flow_key
*key
,
204 struct fl_flow_key
*mkey
)
206 __be16 min_mask
, max_mask
, min_val
, max_val
;
208 min_mask
= htons(filter
->mask
->key
.tp_range
.tp_min
.dst
);
209 max_mask
= htons(filter
->mask
->key
.tp_range
.tp_max
.dst
);
210 min_val
= htons(filter
->key
.tp_range
.tp_min
.dst
);
211 max_val
= htons(filter
->key
.tp_range
.tp_max
.dst
);
213 if (min_mask
&& max_mask
) {
214 if (htons(key
->tp_range
.tp
.dst
) < min_val
||
215 htons(key
->tp_range
.tp
.dst
) > max_val
)
218 /* skb does not have min and max values */
219 mkey
->tp_range
.tp_min
.dst
= filter
->mkey
.tp_range
.tp_min
.dst
;
220 mkey
->tp_range
.tp_max
.dst
= filter
->mkey
.tp_range
.tp_max
.dst
;
225 static bool fl_range_port_src_cmp(struct cls_fl_filter
*filter
,
226 struct fl_flow_key
*key
,
227 struct fl_flow_key
*mkey
)
229 __be16 min_mask
, max_mask
, min_val
, max_val
;
231 min_mask
= htons(filter
->mask
->key
.tp_range
.tp_min
.src
);
232 max_mask
= htons(filter
->mask
->key
.tp_range
.tp_max
.src
);
233 min_val
= htons(filter
->key
.tp_range
.tp_min
.src
);
234 max_val
= htons(filter
->key
.tp_range
.tp_max
.src
);
236 if (min_mask
&& max_mask
) {
237 if (htons(key
->tp_range
.tp
.src
) < min_val
||
238 htons(key
->tp_range
.tp
.src
) > max_val
)
241 /* skb does not have min and max values */
242 mkey
->tp_range
.tp_min
.src
= filter
->mkey
.tp_range
.tp_min
.src
;
243 mkey
->tp_range
.tp_max
.src
= filter
->mkey
.tp_range
.tp_max
.src
;
248 static struct cls_fl_filter
*__fl_lookup(struct fl_flow_mask
*mask
,
249 struct fl_flow_key
*mkey
)
251 return rhashtable_lookup_fast(&mask
->ht
, fl_key_get_start(mkey
, mask
),
252 mask
->filter_ht_params
);
255 static struct cls_fl_filter
*fl_lookup_range(struct fl_flow_mask
*mask
,
256 struct fl_flow_key
*mkey
,
257 struct fl_flow_key
*key
)
259 struct cls_fl_filter
*filter
, *f
;
261 list_for_each_entry_rcu(filter
, &mask
->filters
, list
) {
262 if (!fl_range_port_dst_cmp(filter
, key
, mkey
))
265 if (!fl_range_port_src_cmp(filter
, key
, mkey
))
268 f
= __fl_lookup(mask
, mkey
);
275 static struct cls_fl_filter
*fl_lookup(struct fl_flow_mask
*mask
,
276 struct fl_flow_key
*mkey
,
277 struct fl_flow_key
*key
)
279 if ((mask
->flags
& TCA_FLOWER_MASK_FLAGS_RANGE
))
280 return fl_lookup_range(mask
, mkey
, key
);
282 return __fl_lookup(mask
, mkey
);
285 static u16 fl_ct_info_to_flower_map
[] = {
286 [IP_CT_ESTABLISHED
] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED
|
287 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED
,
288 [IP_CT_RELATED
] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED
|
289 TCA_FLOWER_KEY_CT_FLAGS_RELATED
,
290 [IP_CT_ESTABLISHED_REPLY
] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED
|
291 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED
,
292 [IP_CT_RELATED_REPLY
] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED
|
293 TCA_FLOWER_KEY_CT_FLAGS_RELATED
,
294 [IP_CT_NEW
] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED
|
295 TCA_FLOWER_KEY_CT_FLAGS_NEW
,
298 static int fl_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
299 struct tcf_result
*res
)
301 struct cls_fl_head
*head
= rcu_dereference_bh(tp
->root
);
302 struct fl_flow_key skb_mkey
;
303 struct fl_flow_key skb_key
;
304 struct fl_flow_mask
*mask
;
305 struct cls_fl_filter
*f
;
307 list_for_each_entry_rcu(mask
, &head
->masks
, list
) {
308 flow_dissector_init_keys(&skb_key
.control
, &skb_key
.basic
);
309 fl_clear_masked_range(&skb_key
, mask
);
311 skb_flow_dissect_meta(skb
, &mask
->dissector
, &skb_key
);
312 /* skb_flow_dissect() does not set n_proto in case an unknown
313 * protocol, so do it rather here.
315 skb_key
.basic
.n_proto
= skb
->protocol
;
316 skb_flow_dissect_tunnel_info(skb
, &mask
->dissector
, &skb_key
);
317 skb_flow_dissect_ct(skb
, &mask
->dissector
, &skb_key
,
318 fl_ct_info_to_flower_map
,
319 ARRAY_SIZE(fl_ct_info_to_flower_map
));
320 skb_flow_dissect(skb
, &mask
->dissector
, &skb_key
, 0);
322 fl_set_masked_key(&skb_mkey
, &skb_key
, mask
);
324 f
= fl_lookup(mask
, &skb_mkey
, &skb_key
);
325 if (f
&& !tc_skip_sw(f
->flags
)) {
327 return tcf_exts_exec(skb
, &f
->exts
, res
);
333 static int fl_init(struct tcf_proto
*tp
)
335 struct cls_fl_head
*head
;
337 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
341 spin_lock_init(&head
->masks_lock
);
342 INIT_LIST_HEAD_RCU(&head
->masks
);
343 INIT_LIST_HEAD(&head
->hw_filters
);
344 rcu_assign_pointer(tp
->root
, head
);
345 idr_init(&head
->handle_idr
);
347 return rhashtable_init(&head
->ht
, &mask_ht_params
);
350 static void fl_mask_free(struct fl_flow_mask
*mask
, bool mask_init_done
)
352 /* temporary masks don't have their filters list and ht initialized */
353 if (mask_init_done
) {
354 WARN_ON(!list_empty(&mask
->filters
));
355 rhashtable_destroy(&mask
->ht
);
360 static void fl_mask_free_work(struct work_struct
*work
)
362 struct fl_flow_mask
*mask
= container_of(to_rcu_work(work
),
363 struct fl_flow_mask
, rwork
);
365 fl_mask_free(mask
, true);
368 static void fl_uninit_mask_free_work(struct work_struct
*work
)
370 struct fl_flow_mask
*mask
= container_of(to_rcu_work(work
),
371 struct fl_flow_mask
, rwork
);
373 fl_mask_free(mask
, false);
376 static bool fl_mask_put(struct cls_fl_head
*head
, struct fl_flow_mask
*mask
)
378 if (!refcount_dec_and_test(&mask
->refcnt
))
381 rhashtable_remove_fast(&head
->ht
, &mask
->ht_node
, mask_ht_params
);
383 spin_lock(&head
->masks_lock
);
384 list_del_rcu(&mask
->list
);
385 spin_unlock(&head
->masks_lock
);
387 tcf_queue_work(&mask
->rwork
, fl_mask_free_work
);
392 static struct cls_fl_head
*fl_head_dereference(struct tcf_proto
*tp
)
394 /* Flower classifier only changes root pointer during init and destroy.
395 * Users must obtain reference to tcf_proto instance before calling its
396 * API, so tp->root pointer is protected from concurrent call to
397 * fl_destroy() by reference counting.
399 return rcu_dereference_raw(tp
->root
);
402 static void __fl_destroy_filter(struct cls_fl_filter
*f
)
404 tcf_exts_destroy(&f
->exts
);
405 tcf_exts_put_net(&f
->exts
);
409 static void fl_destroy_filter_work(struct work_struct
*work
)
411 struct cls_fl_filter
*f
= container_of(to_rcu_work(work
),
412 struct cls_fl_filter
, rwork
);
414 __fl_destroy_filter(f
);
417 static void fl_hw_destroy_filter(struct tcf_proto
*tp
, struct cls_fl_filter
*f
,
418 bool rtnl_held
, struct netlink_ext_ack
*extack
)
420 struct tcf_block
*block
= tp
->chain
->block
;
421 struct flow_cls_offload cls_flower
= {};
423 tc_cls_common_offload_init(&cls_flower
.common
, tp
, f
->flags
, extack
);
424 cls_flower
.command
= FLOW_CLS_DESTROY
;
425 cls_flower
.cookie
= (unsigned long) f
;
427 tc_setup_cb_destroy(block
, tp
, TC_SETUP_CLSFLOWER
, &cls_flower
, false,
428 &f
->flags
, &f
->in_hw_count
, rtnl_held
);
432 static int fl_hw_replace_filter(struct tcf_proto
*tp
,
433 struct cls_fl_filter
*f
, bool rtnl_held
,
434 struct netlink_ext_ack
*extack
)
436 struct tcf_block
*block
= tp
->chain
->block
;
437 struct flow_cls_offload cls_flower
= {};
438 bool skip_sw
= tc_skip_sw(f
->flags
);
441 cls_flower
.rule
= flow_rule_alloc(tcf_exts_num_actions(&f
->exts
));
442 if (!cls_flower
.rule
)
445 tc_cls_common_offload_init(&cls_flower
.common
, tp
, f
->flags
, extack
);
446 cls_flower
.command
= FLOW_CLS_REPLACE
;
447 cls_flower
.cookie
= (unsigned long) f
;
448 cls_flower
.rule
->match
.dissector
= &f
->mask
->dissector
;
449 cls_flower
.rule
->match
.mask
= &f
->mask
->key
;
450 cls_flower
.rule
->match
.key
= &f
->mkey
;
451 cls_flower
.classid
= f
->res
.classid
;
453 err
= tc_setup_flow_action(&cls_flower
.rule
->action
, &f
->exts
);
455 kfree(cls_flower
.rule
);
457 NL_SET_ERR_MSG_MOD(extack
, "Failed to setup flow action");
463 err
= tc_setup_cb_add(block
, tp
, TC_SETUP_CLSFLOWER
, &cls_flower
,
464 skip_sw
, &f
->flags
, &f
->in_hw_count
, rtnl_held
);
465 tc_cleanup_flow_action(&cls_flower
.rule
->action
);
466 kfree(cls_flower
.rule
);
469 fl_hw_destroy_filter(tp
, f
, rtnl_held
, NULL
);
473 if (skip_sw
&& !(f
->flags
& TCA_CLS_FLAGS_IN_HW
))
479 static void fl_hw_update_stats(struct tcf_proto
*tp
, struct cls_fl_filter
*f
,
482 struct tcf_block
*block
= tp
->chain
->block
;
483 struct flow_cls_offload cls_flower
= {};
485 tc_cls_common_offload_init(&cls_flower
.common
, tp
, f
->flags
, NULL
);
486 cls_flower
.command
= FLOW_CLS_STATS
;
487 cls_flower
.cookie
= (unsigned long) f
;
488 cls_flower
.classid
= f
->res
.classid
;
490 tc_setup_cb_call(block
, TC_SETUP_CLSFLOWER
, &cls_flower
, false,
493 tcf_exts_stats_update(&f
->exts
, cls_flower
.stats
.bytes
,
494 cls_flower
.stats
.pkts
,
495 cls_flower
.stats
.lastused
,
496 cls_flower
.stats
.used_hw_stats
,
497 cls_flower
.stats
.used_hw_stats_valid
);
500 static void __fl_put(struct cls_fl_filter
*f
)
502 if (!refcount_dec_and_test(&f
->refcnt
))
505 if (tcf_exts_get_net(&f
->exts
))
506 tcf_queue_work(&f
->rwork
, fl_destroy_filter_work
);
508 __fl_destroy_filter(f
);
511 static struct cls_fl_filter
*__fl_get(struct cls_fl_head
*head
, u32 handle
)
513 struct cls_fl_filter
*f
;
516 f
= idr_find(&head
->handle_idr
, handle
);
517 if (f
&& !refcount_inc_not_zero(&f
->refcnt
))
524 static int __fl_delete(struct tcf_proto
*tp
, struct cls_fl_filter
*f
,
525 bool *last
, bool rtnl_held
,
526 struct netlink_ext_ack
*extack
)
528 struct cls_fl_head
*head
= fl_head_dereference(tp
);
532 spin_lock(&tp
->lock
);
534 spin_unlock(&tp
->lock
);
539 rhashtable_remove_fast(&f
->mask
->ht
, &f
->ht_node
,
540 f
->mask
->filter_ht_params
);
541 idr_remove(&head
->handle_idr
, f
->handle
);
542 list_del_rcu(&f
->list
);
543 spin_unlock(&tp
->lock
);
545 *last
= fl_mask_put(head
, f
->mask
);
546 if (!tc_skip_hw(f
->flags
))
547 fl_hw_destroy_filter(tp
, f
, rtnl_held
, extack
);
548 tcf_unbind_filter(tp
, &f
->res
);
554 static void fl_destroy_sleepable(struct work_struct
*work
)
556 struct cls_fl_head
*head
= container_of(to_rcu_work(work
),
560 rhashtable_destroy(&head
->ht
);
562 module_put(THIS_MODULE
);
565 static void fl_destroy(struct tcf_proto
*tp
, bool rtnl_held
,
566 struct netlink_ext_ack
*extack
)
568 struct cls_fl_head
*head
= fl_head_dereference(tp
);
569 struct fl_flow_mask
*mask
, *next_mask
;
570 struct cls_fl_filter
*f
, *next
;
573 list_for_each_entry_safe(mask
, next_mask
, &head
->masks
, list
) {
574 list_for_each_entry_safe(f
, next
, &mask
->filters
, list
) {
575 __fl_delete(tp
, f
, &last
, rtnl_held
, extack
);
580 idr_destroy(&head
->handle_idr
);
582 __module_get(THIS_MODULE
);
583 tcf_queue_work(&head
->rwork
, fl_destroy_sleepable
);
586 static void fl_put(struct tcf_proto
*tp
, void *arg
)
588 struct cls_fl_filter
*f
= arg
;
593 static void *fl_get(struct tcf_proto
*tp
, u32 handle
)
595 struct cls_fl_head
*head
= fl_head_dereference(tp
);
597 return __fl_get(head
, handle
);
600 static const struct nla_policy fl_policy
[TCA_FLOWER_MAX
+ 1] = {
601 [TCA_FLOWER_UNSPEC
] = { .type
= NLA_UNSPEC
},
602 [TCA_FLOWER_CLASSID
] = { .type
= NLA_U32
},
603 [TCA_FLOWER_INDEV
] = { .type
= NLA_STRING
,
605 [TCA_FLOWER_KEY_ETH_DST
] = { .len
= ETH_ALEN
},
606 [TCA_FLOWER_KEY_ETH_DST_MASK
] = { .len
= ETH_ALEN
},
607 [TCA_FLOWER_KEY_ETH_SRC
] = { .len
= ETH_ALEN
},
608 [TCA_FLOWER_KEY_ETH_SRC_MASK
] = { .len
= ETH_ALEN
},
609 [TCA_FLOWER_KEY_ETH_TYPE
] = { .type
= NLA_U16
},
610 [TCA_FLOWER_KEY_IP_PROTO
] = { .type
= NLA_U8
},
611 [TCA_FLOWER_KEY_IPV4_SRC
] = { .type
= NLA_U32
},
612 [TCA_FLOWER_KEY_IPV4_SRC_MASK
] = { .type
= NLA_U32
},
613 [TCA_FLOWER_KEY_IPV4_DST
] = { .type
= NLA_U32
},
614 [TCA_FLOWER_KEY_IPV4_DST_MASK
] = { .type
= NLA_U32
},
615 [TCA_FLOWER_KEY_IPV6_SRC
] = { .len
= sizeof(struct in6_addr
) },
616 [TCA_FLOWER_KEY_IPV6_SRC_MASK
] = { .len
= sizeof(struct in6_addr
) },
617 [TCA_FLOWER_KEY_IPV6_DST
] = { .len
= sizeof(struct in6_addr
) },
618 [TCA_FLOWER_KEY_IPV6_DST_MASK
] = { .len
= sizeof(struct in6_addr
) },
619 [TCA_FLOWER_KEY_TCP_SRC
] = { .type
= NLA_U16
},
620 [TCA_FLOWER_KEY_TCP_DST
] = { .type
= NLA_U16
},
621 [TCA_FLOWER_KEY_UDP_SRC
] = { .type
= NLA_U16
},
622 [TCA_FLOWER_KEY_UDP_DST
] = { .type
= NLA_U16
},
623 [TCA_FLOWER_KEY_VLAN_ID
] = { .type
= NLA_U16
},
624 [TCA_FLOWER_KEY_VLAN_PRIO
] = { .type
= NLA_U8
},
625 [TCA_FLOWER_KEY_VLAN_ETH_TYPE
] = { .type
= NLA_U16
},
626 [TCA_FLOWER_KEY_ENC_KEY_ID
] = { .type
= NLA_U32
},
627 [TCA_FLOWER_KEY_ENC_IPV4_SRC
] = { .type
= NLA_U32
},
628 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
] = { .type
= NLA_U32
},
629 [TCA_FLOWER_KEY_ENC_IPV4_DST
] = { .type
= NLA_U32
},
630 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
] = { .type
= NLA_U32
},
631 [TCA_FLOWER_KEY_ENC_IPV6_SRC
] = { .len
= sizeof(struct in6_addr
) },
632 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
] = { .len
= sizeof(struct in6_addr
) },
633 [TCA_FLOWER_KEY_ENC_IPV6_DST
] = { .len
= sizeof(struct in6_addr
) },
634 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
] = { .len
= sizeof(struct in6_addr
) },
635 [TCA_FLOWER_KEY_TCP_SRC_MASK
] = { .type
= NLA_U16
},
636 [TCA_FLOWER_KEY_TCP_DST_MASK
] = { .type
= NLA_U16
},
637 [TCA_FLOWER_KEY_UDP_SRC_MASK
] = { .type
= NLA_U16
},
638 [TCA_FLOWER_KEY_UDP_DST_MASK
] = { .type
= NLA_U16
},
639 [TCA_FLOWER_KEY_SCTP_SRC_MASK
] = { .type
= NLA_U16
},
640 [TCA_FLOWER_KEY_SCTP_DST_MASK
] = { .type
= NLA_U16
},
641 [TCA_FLOWER_KEY_SCTP_SRC
] = { .type
= NLA_U16
},
642 [TCA_FLOWER_KEY_SCTP_DST
] = { .type
= NLA_U16
},
643 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
] = { .type
= NLA_U16
},
644 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
] = { .type
= NLA_U16
},
645 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT
] = { .type
= NLA_U16
},
646 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
] = { .type
= NLA_U16
},
647 [TCA_FLOWER_KEY_FLAGS
] = { .type
= NLA_U32
},
648 [TCA_FLOWER_KEY_FLAGS_MASK
] = { .type
= NLA_U32
},
649 [TCA_FLOWER_KEY_ICMPV4_TYPE
] = { .type
= NLA_U8
},
650 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK
] = { .type
= NLA_U8
},
651 [TCA_FLOWER_KEY_ICMPV4_CODE
] = { .type
= NLA_U8
},
652 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK
] = { .type
= NLA_U8
},
653 [TCA_FLOWER_KEY_ICMPV6_TYPE
] = { .type
= NLA_U8
},
654 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK
] = { .type
= NLA_U8
},
655 [TCA_FLOWER_KEY_ICMPV6_CODE
] = { .type
= NLA_U8
},
656 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK
] = { .type
= NLA_U8
},
657 [TCA_FLOWER_KEY_ARP_SIP
] = { .type
= NLA_U32
},
658 [TCA_FLOWER_KEY_ARP_SIP_MASK
] = { .type
= NLA_U32
},
659 [TCA_FLOWER_KEY_ARP_TIP
] = { .type
= NLA_U32
},
660 [TCA_FLOWER_KEY_ARP_TIP_MASK
] = { .type
= NLA_U32
},
661 [TCA_FLOWER_KEY_ARP_OP
] = { .type
= NLA_U8
},
662 [TCA_FLOWER_KEY_ARP_OP_MASK
] = { .type
= NLA_U8
},
663 [TCA_FLOWER_KEY_ARP_SHA
] = { .len
= ETH_ALEN
},
664 [TCA_FLOWER_KEY_ARP_SHA_MASK
] = { .len
= ETH_ALEN
},
665 [TCA_FLOWER_KEY_ARP_THA
] = { .len
= ETH_ALEN
},
666 [TCA_FLOWER_KEY_ARP_THA_MASK
] = { .len
= ETH_ALEN
},
667 [TCA_FLOWER_KEY_MPLS_TTL
] = { .type
= NLA_U8
},
668 [TCA_FLOWER_KEY_MPLS_BOS
] = { .type
= NLA_U8
},
669 [TCA_FLOWER_KEY_MPLS_TC
] = { .type
= NLA_U8
},
670 [TCA_FLOWER_KEY_MPLS_LABEL
] = { .type
= NLA_U32
},
671 [TCA_FLOWER_KEY_TCP_FLAGS
] = { .type
= NLA_U16
},
672 [TCA_FLOWER_KEY_TCP_FLAGS_MASK
] = { .type
= NLA_U16
},
673 [TCA_FLOWER_KEY_IP_TOS
] = { .type
= NLA_U8
},
674 [TCA_FLOWER_KEY_IP_TOS_MASK
] = { .type
= NLA_U8
},
675 [TCA_FLOWER_KEY_IP_TTL
] = { .type
= NLA_U8
},
676 [TCA_FLOWER_KEY_IP_TTL_MASK
] = { .type
= NLA_U8
},
677 [TCA_FLOWER_KEY_CVLAN_ID
] = { .type
= NLA_U16
},
678 [TCA_FLOWER_KEY_CVLAN_PRIO
] = { .type
= NLA_U8
},
679 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE
] = { .type
= NLA_U16
},
680 [TCA_FLOWER_KEY_ENC_IP_TOS
] = { .type
= NLA_U8
},
681 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK
] = { .type
= NLA_U8
},
682 [TCA_FLOWER_KEY_ENC_IP_TTL
] = { .type
= NLA_U8
},
683 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK
] = { .type
= NLA_U8
},
684 [TCA_FLOWER_KEY_ENC_OPTS
] = { .type
= NLA_NESTED
},
685 [TCA_FLOWER_KEY_ENC_OPTS_MASK
] = { .type
= NLA_NESTED
},
686 [TCA_FLOWER_KEY_CT_STATE
] = { .type
= NLA_U16
},
687 [TCA_FLOWER_KEY_CT_STATE_MASK
] = { .type
= NLA_U16
},
688 [TCA_FLOWER_KEY_CT_ZONE
] = { .type
= NLA_U16
},
689 [TCA_FLOWER_KEY_CT_ZONE_MASK
] = { .type
= NLA_U16
},
690 [TCA_FLOWER_KEY_CT_MARK
] = { .type
= NLA_U32
},
691 [TCA_FLOWER_KEY_CT_MARK_MASK
] = { .type
= NLA_U32
},
692 [TCA_FLOWER_KEY_CT_LABELS
] = { .type
= NLA_BINARY
,
693 .len
= 128 / BITS_PER_BYTE
},
694 [TCA_FLOWER_KEY_CT_LABELS_MASK
] = { .type
= NLA_BINARY
,
695 .len
= 128 / BITS_PER_BYTE
},
696 [TCA_FLOWER_FLAGS
] = { .type
= NLA_U32
},
699 static const struct nla_policy
700 enc_opts_policy
[TCA_FLOWER_KEY_ENC_OPTS_MAX
+ 1] = {
701 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC
] = {
702 .strict_start_type
= TCA_FLOWER_KEY_ENC_OPTS_VXLAN
},
703 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE
] = { .type
= NLA_NESTED
},
704 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN
] = { .type
= NLA_NESTED
},
705 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN
] = { .type
= NLA_NESTED
},
708 static const struct nla_policy
709 geneve_opt_policy
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX
+ 1] = {
710 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
] = { .type
= NLA_U16
},
711 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
] = { .type
= NLA_U8
},
712 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
] = { .type
= NLA_BINARY
,
716 static const struct nla_policy
717 vxlan_opt_policy
[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX
+ 1] = {
718 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP
] = { .type
= NLA_U32
},
721 static const struct nla_policy
722 erspan_opt_policy
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX
+ 1] = {
723 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER
] = { .type
= NLA_U8
},
724 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX
] = { .type
= NLA_U32
},
725 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR
] = { .type
= NLA_U8
},
726 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID
] = { .type
= NLA_U8
},
729 static void fl_set_key_val(struct nlattr
**tb
,
730 void *val
, int val_type
,
731 void *mask
, int mask_type
, int len
)
735 nla_memcpy(val
, tb
[val_type
], len
);
736 if (mask_type
== TCA_FLOWER_UNSPEC
|| !tb
[mask_type
])
737 memset(mask
, 0xff, len
);
739 nla_memcpy(mask
, tb
[mask_type
], len
);
742 static int fl_set_key_port_range(struct nlattr
**tb
, struct fl_flow_key
*key
,
743 struct fl_flow_key
*mask
,
744 struct netlink_ext_ack
*extack
)
746 fl_set_key_val(tb
, &key
->tp_range
.tp_min
.dst
,
747 TCA_FLOWER_KEY_PORT_DST_MIN
, &mask
->tp_range
.tp_min
.dst
,
748 TCA_FLOWER_UNSPEC
, sizeof(key
->tp_range
.tp_min
.dst
));
749 fl_set_key_val(tb
, &key
->tp_range
.tp_max
.dst
,
750 TCA_FLOWER_KEY_PORT_DST_MAX
, &mask
->tp_range
.tp_max
.dst
,
751 TCA_FLOWER_UNSPEC
, sizeof(key
->tp_range
.tp_max
.dst
));
752 fl_set_key_val(tb
, &key
->tp_range
.tp_min
.src
,
753 TCA_FLOWER_KEY_PORT_SRC_MIN
, &mask
->tp_range
.tp_min
.src
,
754 TCA_FLOWER_UNSPEC
, sizeof(key
->tp_range
.tp_min
.src
));
755 fl_set_key_val(tb
, &key
->tp_range
.tp_max
.src
,
756 TCA_FLOWER_KEY_PORT_SRC_MAX
, &mask
->tp_range
.tp_max
.src
,
757 TCA_FLOWER_UNSPEC
, sizeof(key
->tp_range
.tp_max
.src
));
759 if (mask
->tp_range
.tp_min
.dst
&& mask
->tp_range
.tp_max
.dst
&&
760 htons(key
->tp_range
.tp_max
.dst
) <=
761 htons(key
->tp_range
.tp_min
.dst
)) {
762 NL_SET_ERR_MSG_ATTR(extack
,
763 tb
[TCA_FLOWER_KEY_PORT_DST_MIN
],
764 "Invalid destination port range (min must be strictly smaller than max)");
767 if (mask
->tp_range
.tp_min
.src
&& mask
->tp_range
.tp_max
.src
&&
768 htons(key
->tp_range
.tp_max
.src
) <=
769 htons(key
->tp_range
.tp_min
.src
)) {
770 NL_SET_ERR_MSG_ATTR(extack
,
771 tb
[TCA_FLOWER_KEY_PORT_SRC_MIN
],
772 "Invalid source port range (min must be strictly smaller than max)");
779 static int fl_set_key_mpls(struct nlattr
**tb
,
780 struct flow_dissector_key_mpls
*key_val
,
781 struct flow_dissector_key_mpls
*key_mask
,
782 struct netlink_ext_ack
*extack
)
784 if (tb
[TCA_FLOWER_KEY_MPLS_TTL
]) {
785 key_val
->mpls_ttl
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_TTL
]);
786 key_mask
->mpls_ttl
= MPLS_TTL_MASK
;
788 if (tb
[TCA_FLOWER_KEY_MPLS_BOS
]) {
789 u8 bos
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_BOS
]);
791 if (bos
& ~MPLS_BOS_MASK
) {
792 NL_SET_ERR_MSG_ATTR(extack
,
793 tb
[TCA_FLOWER_KEY_MPLS_BOS
],
794 "Bottom Of Stack (BOS) must be 0 or 1");
797 key_val
->mpls_bos
= bos
;
798 key_mask
->mpls_bos
= MPLS_BOS_MASK
;
800 if (tb
[TCA_FLOWER_KEY_MPLS_TC
]) {
801 u8 tc
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_TC
]);
803 if (tc
& ~MPLS_TC_MASK
) {
804 NL_SET_ERR_MSG_ATTR(extack
,
805 tb
[TCA_FLOWER_KEY_MPLS_TC
],
806 "Traffic Class (TC) must be between 0 and 7");
809 key_val
->mpls_tc
= tc
;
810 key_mask
->mpls_tc
= MPLS_TC_MASK
;
812 if (tb
[TCA_FLOWER_KEY_MPLS_LABEL
]) {
813 u32 label
= nla_get_u32(tb
[TCA_FLOWER_KEY_MPLS_LABEL
]);
815 if (label
& ~MPLS_LABEL_MASK
) {
816 NL_SET_ERR_MSG_ATTR(extack
,
817 tb
[TCA_FLOWER_KEY_MPLS_LABEL
],
818 "Label must be between 0 and 1048575");
821 key_val
->mpls_label
= label
;
822 key_mask
->mpls_label
= MPLS_LABEL_MASK
;
827 static void fl_set_key_vlan(struct nlattr
**tb
,
829 int vlan_id_key
, int vlan_prio_key
,
830 struct flow_dissector_key_vlan
*key_val
,
831 struct flow_dissector_key_vlan
*key_mask
)
833 #define VLAN_PRIORITY_MASK 0x7
835 if (tb
[vlan_id_key
]) {
837 nla_get_u16(tb
[vlan_id_key
]) & VLAN_VID_MASK
;
838 key_mask
->vlan_id
= VLAN_VID_MASK
;
840 if (tb
[vlan_prio_key
]) {
841 key_val
->vlan_priority
=
842 nla_get_u8(tb
[vlan_prio_key
]) &
844 key_mask
->vlan_priority
= VLAN_PRIORITY_MASK
;
846 key_val
->vlan_tpid
= ethertype
;
847 key_mask
->vlan_tpid
= cpu_to_be16(~0);
850 static void fl_set_key_flag(u32 flower_key
, u32 flower_mask
,
851 u32
*dissector_key
, u32
*dissector_mask
,
852 u32 flower_flag_bit
, u32 dissector_flag_bit
)
854 if (flower_mask
& flower_flag_bit
) {
855 *dissector_mask
|= dissector_flag_bit
;
856 if (flower_key
& flower_flag_bit
)
857 *dissector_key
|= dissector_flag_bit
;
861 static int fl_set_key_flags(struct nlattr
**tb
, u32
*flags_key
,
862 u32
*flags_mask
, struct netlink_ext_ack
*extack
)
866 /* mask is mandatory for flags */
867 if (!tb
[TCA_FLOWER_KEY_FLAGS_MASK
]) {
868 NL_SET_ERR_MSG(extack
, "Missing flags mask");
872 key
= be32_to_cpu(nla_get_u32(tb
[TCA_FLOWER_KEY_FLAGS
]));
873 mask
= be32_to_cpu(nla_get_u32(tb
[TCA_FLOWER_KEY_FLAGS_MASK
]));
878 fl_set_key_flag(key
, mask
, flags_key
, flags_mask
,
879 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT
, FLOW_DIS_IS_FRAGMENT
);
880 fl_set_key_flag(key
, mask
, flags_key
, flags_mask
,
881 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST
,
882 FLOW_DIS_FIRST_FRAG
);
887 static void fl_set_key_ip(struct nlattr
**tb
, bool encap
,
888 struct flow_dissector_key_ip
*key
,
889 struct flow_dissector_key_ip
*mask
)
891 int tos_key
= encap
? TCA_FLOWER_KEY_ENC_IP_TOS
: TCA_FLOWER_KEY_IP_TOS
;
892 int ttl_key
= encap
? TCA_FLOWER_KEY_ENC_IP_TTL
: TCA_FLOWER_KEY_IP_TTL
;
893 int tos_mask
= encap
? TCA_FLOWER_KEY_ENC_IP_TOS_MASK
: TCA_FLOWER_KEY_IP_TOS_MASK
;
894 int ttl_mask
= encap
? TCA_FLOWER_KEY_ENC_IP_TTL_MASK
: TCA_FLOWER_KEY_IP_TTL_MASK
;
896 fl_set_key_val(tb
, &key
->tos
, tos_key
, &mask
->tos
, tos_mask
, sizeof(key
->tos
));
897 fl_set_key_val(tb
, &key
->ttl
, ttl_key
, &mask
->ttl
, ttl_mask
, sizeof(key
->ttl
));
900 static int fl_set_geneve_opt(const struct nlattr
*nla
, struct fl_flow_key
*key
,
901 int depth
, int option_len
,
902 struct netlink_ext_ack
*extack
)
904 struct nlattr
*tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX
+ 1];
905 struct nlattr
*class = NULL
, *type
= NULL
, *data
= NULL
;
906 struct geneve_opt
*opt
;
907 int err
, data_len
= 0;
909 if (option_len
> sizeof(struct geneve_opt
))
910 data_len
= option_len
- sizeof(struct geneve_opt
);
912 opt
= (struct geneve_opt
*)&key
->enc_opts
.data
[key
->enc_opts
.len
];
913 memset(opt
, 0xff, option_len
);
914 opt
->length
= data_len
/ 4;
919 /* If no mask has been prodived we assume an exact match. */
921 return sizeof(struct geneve_opt
) + data_len
;
923 if (nla_type(nla
) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE
) {
924 NL_SET_ERR_MSG(extack
, "Non-geneve option type for mask");
928 err
= nla_parse_nested_deprecated(tb
,
929 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX
,
930 nla
, geneve_opt_policy
, extack
);
934 /* We are not allowed to omit any of CLASS, TYPE or DATA
935 * fields from the key.
938 (!tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
] ||
939 !tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
] ||
940 !tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
])) {
941 NL_SET_ERR_MSG(extack
, "Missing tunnel key geneve option class, type or data");
945 /* Omitting any of CLASS, TYPE or DATA fields is allowed
948 if (tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
]) {
949 int new_len
= key
->enc_opts
.len
;
951 data
= tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
];
952 data_len
= nla_len(data
);
954 NL_SET_ERR_MSG(extack
, "Tunnel key geneve option data is less than 4 bytes long");
958 NL_SET_ERR_MSG(extack
, "Tunnel key geneve option data is not a multiple of 4 bytes long");
962 new_len
+= sizeof(struct geneve_opt
) + data_len
;
963 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX
!= IP_TUNNEL_OPTS_MAX
);
964 if (new_len
> FLOW_DIS_TUN_OPTS_MAX
) {
965 NL_SET_ERR_MSG(extack
, "Tunnel options exceeds max size");
968 opt
->length
= data_len
/ 4;
969 memcpy(opt
->opt_data
, nla_data(data
), data_len
);
972 if (tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
]) {
973 class = tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
];
974 opt
->opt_class
= nla_get_be16(class);
977 if (tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
]) {
978 type
= tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
];
979 opt
->type
= nla_get_u8(type
);
982 return sizeof(struct geneve_opt
) + data_len
;
985 static int fl_set_vxlan_opt(const struct nlattr
*nla
, struct fl_flow_key
*key
,
986 int depth
, int option_len
,
987 struct netlink_ext_ack
*extack
)
989 struct nlattr
*tb
[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX
+ 1];
990 struct vxlan_metadata
*md
;
993 md
= (struct vxlan_metadata
*)&key
->enc_opts
.data
[key
->enc_opts
.len
];
994 memset(md
, 0xff, sizeof(*md
));
999 if (nla_type(nla
) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN
) {
1000 NL_SET_ERR_MSG(extack
, "Non-vxlan option type for mask");
1004 err
= nla_parse_nested(tb
, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX
, nla
,
1005 vxlan_opt_policy
, extack
);
1009 if (!option_len
&& !tb
[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP
]) {
1010 NL_SET_ERR_MSG(extack
, "Missing tunnel key vxlan option gbp");
1014 if (tb
[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP
])
1015 md
->gbp
= nla_get_u32(tb
[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP
]);
1020 static int fl_set_erspan_opt(const struct nlattr
*nla
, struct fl_flow_key
*key
,
1021 int depth
, int option_len
,
1022 struct netlink_ext_ack
*extack
)
1024 struct nlattr
*tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX
+ 1];
1025 struct erspan_metadata
*md
;
1028 md
= (struct erspan_metadata
*)&key
->enc_opts
.data
[key
->enc_opts
.len
];
1029 memset(md
, 0xff, sizeof(*md
));
1035 if (nla_type(nla
) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN
) {
1036 NL_SET_ERR_MSG(extack
, "Non-erspan option type for mask");
1040 err
= nla_parse_nested(tb
, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX
, nla
,
1041 erspan_opt_policy
, extack
);
1045 if (!option_len
&& !tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER
]) {
1046 NL_SET_ERR_MSG(extack
, "Missing tunnel key erspan option ver");
1050 if (tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER
])
1051 md
->version
= nla_get_u8(tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER
]);
1053 if (md
->version
== 1) {
1054 if (!option_len
&& !tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX
]) {
1055 NL_SET_ERR_MSG(extack
, "Missing tunnel key erspan option index");
1058 if (tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX
]) {
1059 nla
= tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX
];
1060 md
->u
.index
= nla_get_be32(nla
);
1062 } else if (md
->version
== 2) {
1063 if (!option_len
&& (!tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR
] ||
1064 !tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID
])) {
1065 NL_SET_ERR_MSG(extack
, "Missing tunnel key erspan option dir or hwid");
1068 if (tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR
]) {
1069 nla
= tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR
];
1070 md
->u
.md2
.dir
= nla_get_u8(nla
);
1072 if (tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID
]) {
1073 nla
= tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID
];
1074 set_hwid(&md
->u
.md2
, nla_get_u8(nla
));
1077 NL_SET_ERR_MSG(extack
, "Tunnel key erspan option ver is incorrect");
1084 static int fl_set_enc_opt(struct nlattr
**tb
, struct fl_flow_key
*key
,
1085 struct fl_flow_key
*mask
,
1086 struct netlink_ext_ack
*extack
)
1088 const struct nlattr
*nla_enc_key
, *nla_opt_key
, *nla_opt_msk
= NULL
;
1089 int err
, option_len
, key_depth
, msk_depth
= 0;
1091 err
= nla_validate_nested_deprecated(tb
[TCA_FLOWER_KEY_ENC_OPTS
],
1092 TCA_FLOWER_KEY_ENC_OPTS_MAX
,
1093 enc_opts_policy
, extack
);
1097 nla_enc_key
= nla_data(tb
[TCA_FLOWER_KEY_ENC_OPTS
]);
1099 if (tb
[TCA_FLOWER_KEY_ENC_OPTS_MASK
]) {
1100 err
= nla_validate_nested_deprecated(tb
[TCA_FLOWER_KEY_ENC_OPTS_MASK
],
1101 TCA_FLOWER_KEY_ENC_OPTS_MAX
,
1102 enc_opts_policy
, extack
);
1106 nla_opt_msk
= nla_data(tb
[TCA_FLOWER_KEY_ENC_OPTS_MASK
]);
1107 msk_depth
= nla_len(tb
[TCA_FLOWER_KEY_ENC_OPTS_MASK
]);
1110 nla_for_each_attr(nla_opt_key
, nla_enc_key
,
1111 nla_len(tb
[TCA_FLOWER_KEY_ENC_OPTS
]), key_depth
) {
1112 switch (nla_type(nla_opt_key
)) {
1113 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE
:
1114 if (key
->enc_opts
.dst_opt_type
&&
1115 key
->enc_opts
.dst_opt_type
!= TUNNEL_GENEVE_OPT
) {
1116 NL_SET_ERR_MSG(extack
, "Duplicate type for geneve options");
1120 key
->enc_opts
.dst_opt_type
= TUNNEL_GENEVE_OPT
;
1121 option_len
= fl_set_geneve_opt(nla_opt_key
, key
,
1122 key_depth
, option_len
,
1127 key
->enc_opts
.len
+= option_len
;
1128 /* At the same time we need to parse through the mask
1129 * in order to verify exact and mask attribute lengths.
1131 mask
->enc_opts
.dst_opt_type
= TUNNEL_GENEVE_OPT
;
1132 option_len
= fl_set_geneve_opt(nla_opt_msk
, mask
,
1133 msk_depth
, option_len
,
1138 mask
->enc_opts
.len
+= option_len
;
1139 if (key
->enc_opts
.len
!= mask
->enc_opts
.len
) {
1140 NL_SET_ERR_MSG(extack
, "Key and mask miss aligned");
1145 nla_opt_msk
= nla_next(nla_opt_msk
, &msk_depth
);
1147 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN
:
1148 if (key
->enc_opts
.dst_opt_type
) {
1149 NL_SET_ERR_MSG(extack
, "Duplicate type for vxlan options");
1153 key
->enc_opts
.dst_opt_type
= TUNNEL_VXLAN_OPT
;
1154 option_len
= fl_set_vxlan_opt(nla_opt_key
, key
,
1155 key_depth
, option_len
,
1160 key
->enc_opts
.len
+= option_len
;
1161 /* At the same time we need to parse through the mask
1162 * in order to verify exact and mask attribute lengths.
1164 mask
->enc_opts
.dst_opt_type
= TUNNEL_VXLAN_OPT
;
1165 option_len
= fl_set_vxlan_opt(nla_opt_msk
, mask
,
1166 msk_depth
, option_len
,
1171 mask
->enc_opts
.len
+= option_len
;
1172 if (key
->enc_opts
.len
!= mask
->enc_opts
.len
) {
1173 NL_SET_ERR_MSG(extack
, "Key and mask miss aligned");
1178 nla_opt_msk
= nla_next(nla_opt_msk
, &msk_depth
);
1180 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN
:
1181 if (key
->enc_opts
.dst_opt_type
) {
1182 NL_SET_ERR_MSG(extack
, "Duplicate type for erspan options");
1186 key
->enc_opts
.dst_opt_type
= TUNNEL_ERSPAN_OPT
;
1187 option_len
= fl_set_erspan_opt(nla_opt_key
, key
,
1188 key_depth
, option_len
,
1193 key
->enc_opts
.len
+= option_len
;
1194 /* At the same time we need to parse through the mask
1195 * in order to verify exact and mask attribute lengths.
1197 mask
->enc_opts
.dst_opt_type
= TUNNEL_ERSPAN_OPT
;
1198 option_len
= fl_set_erspan_opt(nla_opt_msk
, mask
,
1199 msk_depth
, option_len
,
1204 mask
->enc_opts
.len
+= option_len
;
1205 if (key
->enc_opts
.len
!= mask
->enc_opts
.len
) {
1206 NL_SET_ERR_MSG(extack
, "Key and mask miss aligned");
1211 nla_opt_msk
= nla_next(nla_opt_msk
, &msk_depth
);
1214 NL_SET_ERR_MSG(extack
, "Unknown tunnel option type");
1222 static int fl_set_key_ct(struct nlattr
**tb
,
1223 struct flow_dissector_key_ct
*key
,
1224 struct flow_dissector_key_ct
*mask
,
1225 struct netlink_ext_ack
*extack
)
1227 if (tb
[TCA_FLOWER_KEY_CT_STATE
]) {
1228 if (!IS_ENABLED(CONFIG_NF_CONNTRACK
)) {
1229 NL_SET_ERR_MSG(extack
, "Conntrack isn't enabled");
1232 fl_set_key_val(tb
, &key
->ct_state
, TCA_FLOWER_KEY_CT_STATE
,
1233 &mask
->ct_state
, TCA_FLOWER_KEY_CT_STATE_MASK
,
1234 sizeof(key
->ct_state
));
1236 if (tb
[TCA_FLOWER_KEY_CT_ZONE
]) {
1237 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES
)) {
1238 NL_SET_ERR_MSG(extack
, "Conntrack zones isn't enabled");
1241 fl_set_key_val(tb
, &key
->ct_zone
, TCA_FLOWER_KEY_CT_ZONE
,
1242 &mask
->ct_zone
, TCA_FLOWER_KEY_CT_ZONE_MASK
,
1243 sizeof(key
->ct_zone
));
1245 if (tb
[TCA_FLOWER_KEY_CT_MARK
]) {
1246 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK
)) {
1247 NL_SET_ERR_MSG(extack
, "Conntrack mark isn't enabled");
1250 fl_set_key_val(tb
, &key
->ct_mark
, TCA_FLOWER_KEY_CT_MARK
,
1251 &mask
->ct_mark
, TCA_FLOWER_KEY_CT_MARK_MASK
,
1252 sizeof(key
->ct_mark
));
1254 if (tb
[TCA_FLOWER_KEY_CT_LABELS
]) {
1255 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS
)) {
1256 NL_SET_ERR_MSG(extack
, "Conntrack labels aren't enabled");
1259 fl_set_key_val(tb
, key
->ct_labels
, TCA_FLOWER_KEY_CT_LABELS
,
1260 mask
->ct_labels
, TCA_FLOWER_KEY_CT_LABELS_MASK
,
1261 sizeof(key
->ct_labels
));
1267 static int fl_set_key(struct net
*net
, struct nlattr
**tb
,
1268 struct fl_flow_key
*key
, struct fl_flow_key
*mask
,
1269 struct netlink_ext_ack
*extack
)
1274 if (tb
[TCA_FLOWER_INDEV
]) {
1275 int err
= tcf_change_indev(net
, tb
[TCA_FLOWER_INDEV
], extack
);
1278 key
->meta
.ingress_ifindex
= err
;
1279 mask
->meta
.ingress_ifindex
= 0xffffffff;
1282 fl_set_key_val(tb
, key
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST
,
1283 mask
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST_MASK
,
1284 sizeof(key
->eth
.dst
));
1285 fl_set_key_val(tb
, key
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC
,
1286 mask
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC_MASK
,
1287 sizeof(key
->eth
.src
));
1289 if (tb
[TCA_FLOWER_KEY_ETH_TYPE
]) {
1290 ethertype
= nla_get_be16(tb
[TCA_FLOWER_KEY_ETH_TYPE
]);
1292 if (eth_type_vlan(ethertype
)) {
1293 fl_set_key_vlan(tb
, ethertype
, TCA_FLOWER_KEY_VLAN_ID
,
1294 TCA_FLOWER_KEY_VLAN_PRIO
, &key
->vlan
,
1297 if (tb
[TCA_FLOWER_KEY_VLAN_ETH_TYPE
]) {
1298 ethertype
= nla_get_be16(tb
[TCA_FLOWER_KEY_VLAN_ETH_TYPE
]);
1299 if (eth_type_vlan(ethertype
)) {
1300 fl_set_key_vlan(tb
, ethertype
,
1301 TCA_FLOWER_KEY_CVLAN_ID
,
1302 TCA_FLOWER_KEY_CVLAN_PRIO
,
1303 &key
->cvlan
, &mask
->cvlan
);
1304 fl_set_key_val(tb
, &key
->basic
.n_proto
,
1305 TCA_FLOWER_KEY_CVLAN_ETH_TYPE
,
1306 &mask
->basic
.n_proto
,
1308 sizeof(key
->basic
.n_proto
));
1310 key
->basic
.n_proto
= ethertype
;
1311 mask
->basic
.n_proto
= cpu_to_be16(~0);
1315 key
->basic
.n_proto
= ethertype
;
1316 mask
->basic
.n_proto
= cpu_to_be16(~0);
1320 if (key
->basic
.n_proto
== htons(ETH_P_IP
) ||
1321 key
->basic
.n_proto
== htons(ETH_P_IPV6
)) {
1322 fl_set_key_val(tb
, &key
->basic
.ip_proto
, TCA_FLOWER_KEY_IP_PROTO
,
1323 &mask
->basic
.ip_proto
, TCA_FLOWER_UNSPEC
,
1324 sizeof(key
->basic
.ip_proto
));
1325 fl_set_key_ip(tb
, false, &key
->ip
, &mask
->ip
);
1328 if (tb
[TCA_FLOWER_KEY_IPV4_SRC
] || tb
[TCA_FLOWER_KEY_IPV4_DST
]) {
1329 key
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
1330 mask
->control
.addr_type
= ~0;
1331 fl_set_key_val(tb
, &key
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC
,
1332 &mask
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC_MASK
,
1333 sizeof(key
->ipv4
.src
));
1334 fl_set_key_val(tb
, &key
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST
,
1335 &mask
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST_MASK
,
1336 sizeof(key
->ipv4
.dst
));
1337 } else if (tb
[TCA_FLOWER_KEY_IPV6_SRC
] || tb
[TCA_FLOWER_KEY_IPV6_DST
]) {
1338 key
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
1339 mask
->control
.addr_type
= ~0;
1340 fl_set_key_val(tb
, &key
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC
,
1341 &mask
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC_MASK
,
1342 sizeof(key
->ipv6
.src
));
1343 fl_set_key_val(tb
, &key
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST
,
1344 &mask
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST_MASK
,
1345 sizeof(key
->ipv6
.dst
));
1348 if (key
->basic
.ip_proto
== IPPROTO_TCP
) {
1349 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC
,
1350 &mask
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC_MASK
,
1351 sizeof(key
->tp
.src
));
1352 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST
,
1353 &mask
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST_MASK
,
1354 sizeof(key
->tp
.dst
));
1355 fl_set_key_val(tb
, &key
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS
,
1356 &mask
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS_MASK
,
1357 sizeof(key
->tcp
.flags
));
1358 } else if (key
->basic
.ip_proto
== IPPROTO_UDP
) {
1359 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC
,
1360 &mask
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC_MASK
,
1361 sizeof(key
->tp
.src
));
1362 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST
,
1363 &mask
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST_MASK
,
1364 sizeof(key
->tp
.dst
));
1365 } else if (key
->basic
.ip_proto
== IPPROTO_SCTP
) {
1366 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC
,
1367 &mask
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC_MASK
,
1368 sizeof(key
->tp
.src
));
1369 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST
,
1370 &mask
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST_MASK
,
1371 sizeof(key
->tp
.dst
));
1372 } else if (key
->basic
.n_proto
== htons(ETH_P_IP
) &&
1373 key
->basic
.ip_proto
== IPPROTO_ICMP
) {
1374 fl_set_key_val(tb
, &key
->icmp
.type
, TCA_FLOWER_KEY_ICMPV4_TYPE
,
1376 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK
,
1377 sizeof(key
->icmp
.type
));
1378 fl_set_key_val(tb
, &key
->icmp
.code
, TCA_FLOWER_KEY_ICMPV4_CODE
,
1380 TCA_FLOWER_KEY_ICMPV4_CODE_MASK
,
1381 sizeof(key
->icmp
.code
));
1382 } else if (key
->basic
.n_proto
== htons(ETH_P_IPV6
) &&
1383 key
->basic
.ip_proto
== IPPROTO_ICMPV6
) {
1384 fl_set_key_val(tb
, &key
->icmp
.type
, TCA_FLOWER_KEY_ICMPV6_TYPE
,
1386 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK
,
1387 sizeof(key
->icmp
.type
));
1388 fl_set_key_val(tb
, &key
->icmp
.code
, TCA_FLOWER_KEY_ICMPV6_CODE
,
1390 TCA_FLOWER_KEY_ICMPV6_CODE_MASK
,
1391 sizeof(key
->icmp
.code
));
1392 } else if (key
->basic
.n_proto
== htons(ETH_P_MPLS_UC
) ||
1393 key
->basic
.n_proto
== htons(ETH_P_MPLS_MC
)) {
1394 ret
= fl_set_key_mpls(tb
, &key
->mpls
, &mask
->mpls
, extack
);
1397 } else if (key
->basic
.n_proto
== htons(ETH_P_ARP
) ||
1398 key
->basic
.n_proto
== htons(ETH_P_RARP
)) {
1399 fl_set_key_val(tb
, &key
->arp
.sip
, TCA_FLOWER_KEY_ARP_SIP
,
1400 &mask
->arp
.sip
, TCA_FLOWER_KEY_ARP_SIP_MASK
,
1401 sizeof(key
->arp
.sip
));
1402 fl_set_key_val(tb
, &key
->arp
.tip
, TCA_FLOWER_KEY_ARP_TIP
,
1403 &mask
->arp
.tip
, TCA_FLOWER_KEY_ARP_TIP_MASK
,
1404 sizeof(key
->arp
.tip
));
1405 fl_set_key_val(tb
, &key
->arp
.op
, TCA_FLOWER_KEY_ARP_OP
,
1406 &mask
->arp
.op
, TCA_FLOWER_KEY_ARP_OP_MASK
,
1407 sizeof(key
->arp
.op
));
1408 fl_set_key_val(tb
, key
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA
,
1409 mask
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA_MASK
,
1410 sizeof(key
->arp
.sha
));
1411 fl_set_key_val(tb
, key
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA
,
1412 mask
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA_MASK
,
1413 sizeof(key
->arp
.tha
));
1416 if (key
->basic
.ip_proto
== IPPROTO_TCP
||
1417 key
->basic
.ip_proto
== IPPROTO_UDP
||
1418 key
->basic
.ip_proto
== IPPROTO_SCTP
) {
1419 ret
= fl_set_key_port_range(tb
, key
, mask
, extack
);
1424 if (tb
[TCA_FLOWER_KEY_ENC_IPV4_SRC
] ||
1425 tb
[TCA_FLOWER_KEY_ENC_IPV4_DST
]) {
1426 key
->enc_control
.addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
1427 mask
->enc_control
.addr_type
= ~0;
1428 fl_set_key_val(tb
, &key
->enc_ipv4
.src
,
1429 TCA_FLOWER_KEY_ENC_IPV4_SRC
,
1430 &mask
->enc_ipv4
.src
,
1431 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
,
1432 sizeof(key
->enc_ipv4
.src
));
1433 fl_set_key_val(tb
, &key
->enc_ipv4
.dst
,
1434 TCA_FLOWER_KEY_ENC_IPV4_DST
,
1435 &mask
->enc_ipv4
.dst
,
1436 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
,
1437 sizeof(key
->enc_ipv4
.dst
));
1440 if (tb
[TCA_FLOWER_KEY_ENC_IPV6_SRC
] ||
1441 tb
[TCA_FLOWER_KEY_ENC_IPV6_DST
]) {
1442 key
->enc_control
.addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
1443 mask
->enc_control
.addr_type
= ~0;
1444 fl_set_key_val(tb
, &key
->enc_ipv6
.src
,
1445 TCA_FLOWER_KEY_ENC_IPV6_SRC
,
1446 &mask
->enc_ipv6
.src
,
1447 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
,
1448 sizeof(key
->enc_ipv6
.src
));
1449 fl_set_key_val(tb
, &key
->enc_ipv6
.dst
,
1450 TCA_FLOWER_KEY_ENC_IPV6_DST
,
1451 &mask
->enc_ipv6
.dst
,
1452 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
,
1453 sizeof(key
->enc_ipv6
.dst
));
1456 fl_set_key_val(tb
, &key
->enc_key_id
.keyid
, TCA_FLOWER_KEY_ENC_KEY_ID
,
1457 &mask
->enc_key_id
.keyid
, TCA_FLOWER_UNSPEC
,
1458 sizeof(key
->enc_key_id
.keyid
));
1460 fl_set_key_val(tb
, &key
->enc_tp
.src
, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
,
1461 &mask
->enc_tp
.src
, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
,
1462 sizeof(key
->enc_tp
.src
));
1464 fl_set_key_val(tb
, &key
->enc_tp
.dst
, TCA_FLOWER_KEY_ENC_UDP_DST_PORT
,
1465 &mask
->enc_tp
.dst
, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
,
1466 sizeof(key
->enc_tp
.dst
));
1468 fl_set_key_ip(tb
, true, &key
->enc_ip
, &mask
->enc_ip
);
1470 if (tb
[TCA_FLOWER_KEY_ENC_OPTS
]) {
1471 ret
= fl_set_enc_opt(tb
, key
, mask
, extack
);
1476 ret
= fl_set_key_ct(tb
, &key
->ct
, &mask
->ct
, extack
);
1480 if (tb
[TCA_FLOWER_KEY_FLAGS
])
1481 ret
= fl_set_key_flags(tb
, &key
->control
.flags
,
1482 &mask
->control
.flags
, extack
);
1487 static void fl_mask_copy(struct fl_flow_mask
*dst
,
1488 struct fl_flow_mask
*src
)
1490 const void *psrc
= fl_key_get_start(&src
->key
, src
);
1491 void *pdst
= fl_key_get_start(&dst
->key
, src
);
1493 memcpy(pdst
, psrc
, fl_mask_range(src
));
1494 dst
->range
= src
->range
;
1497 static const struct rhashtable_params fl_ht_params
= {
1498 .key_offset
= offsetof(struct cls_fl_filter
, mkey
), /* base offset */
1499 .head_offset
= offsetof(struct cls_fl_filter
, ht_node
),
1500 .automatic_shrinking
= true,
1503 static int fl_init_mask_hashtable(struct fl_flow_mask
*mask
)
1505 mask
->filter_ht_params
= fl_ht_params
;
1506 mask
->filter_ht_params
.key_len
= fl_mask_range(mask
);
1507 mask
->filter_ht_params
.key_offset
+= mask
->range
.start
;
1509 return rhashtable_init(&mask
->ht
, &mask
->filter_ht_params
);
1512 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1513 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1515 #define FL_KEY_IS_MASKED(mask, member) \
1516 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1517 0, FL_KEY_MEMBER_SIZE(member)) \
1519 #define FL_KEY_SET(keys, cnt, id, member) \
1521 keys[cnt].key_id = id; \
1522 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1526 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
1528 if (FL_KEY_IS_MASKED(mask, member)) \
1529 FL_KEY_SET(keys, cnt, id, member); \
1532 static void fl_init_dissector(struct flow_dissector
*dissector
,
1533 struct fl_flow_key
*mask
)
1535 struct flow_dissector_key keys
[FLOW_DISSECTOR_KEY_MAX
];
1538 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1539 FLOW_DISSECTOR_KEY_META
, meta
);
1540 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_CONTROL
, control
);
1541 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_BASIC
, basic
);
1542 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1543 FLOW_DISSECTOR_KEY_ETH_ADDRS
, eth
);
1544 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1545 FLOW_DISSECTOR_KEY_IPV4_ADDRS
, ipv4
);
1546 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1547 FLOW_DISSECTOR_KEY_IPV6_ADDRS
, ipv6
);
1548 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1549 FLOW_DISSECTOR_KEY_PORTS
, tp
);
1550 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1551 FLOW_DISSECTOR_KEY_PORTS_RANGE
, tp_range
);
1552 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1553 FLOW_DISSECTOR_KEY_IP
, ip
);
1554 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1555 FLOW_DISSECTOR_KEY_TCP
, tcp
);
1556 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1557 FLOW_DISSECTOR_KEY_ICMP
, icmp
);
1558 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1559 FLOW_DISSECTOR_KEY_ARP
, arp
);
1560 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1561 FLOW_DISSECTOR_KEY_MPLS
, mpls
);
1562 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1563 FLOW_DISSECTOR_KEY_VLAN
, vlan
);
1564 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1565 FLOW_DISSECTOR_KEY_CVLAN
, cvlan
);
1566 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1567 FLOW_DISSECTOR_KEY_ENC_KEYID
, enc_key_id
);
1568 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1569 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
, enc_ipv4
);
1570 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1571 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
, enc_ipv6
);
1572 if (FL_KEY_IS_MASKED(mask
, enc_ipv4
) ||
1573 FL_KEY_IS_MASKED(mask
, enc_ipv6
))
1574 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_ENC_CONTROL
,
1576 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1577 FLOW_DISSECTOR_KEY_ENC_PORTS
, enc_tp
);
1578 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1579 FLOW_DISSECTOR_KEY_ENC_IP
, enc_ip
);
1580 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1581 FLOW_DISSECTOR_KEY_ENC_OPTS
, enc_opts
);
1582 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1583 FLOW_DISSECTOR_KEY_CT
, ct
);
1585 skb_flow_dissector_init(dissector
, keys
, cnt
);
1588 static struct fl_flow_mask
*fl_create_new_mask(struct cls_fl_head
*head
,
1589 struct fl_flow_mask
*mask
)
1591 struct fl_flow_mask
*newmask
;
1594 newmask
= kzalloc(sizeof(*newmask
), GFP_KERNEL
);
1596 return ERR_PTR(-ENOMEM
);
1598 fl_mask_copy(newmask
, mask
);
1600 if ((newmask
->key
.tp_range
.tp_min
.dst
&&
1601 newmask
->key
.tp_range
.tp_max
.dst
) ||
1602 (newmask
->key
.tp_range
.tp_min
.src
&&
1603 newmask
->key
.tp_range
.tp_max
.src
))
1604 newmask
->flags
|= TCA_FLOWER_MASK_FLAGS_RANGE
;
1606 err
= fl_init_mask_hashtable(newmask
);
1610 fl_init_dissector(&newmask
->dissector
, &newmask
->key
);
1612 INIT_LIST_HEAD_RCU(&newmask
->filters
);
1614 refcount_set(&newmask
->refcnt
, 1);
1615 err
= rhashtable_replace_fast(&head
->ht
, &mask
->ht_node
,
1616 &newmask
->ht_node
, mask_ht_params
);
1618 goto errout_destroy
;
1620 spin_lock(&head
->masks_lock
);
1621 list_add_tail_rcu(&newmask
->list
, &head
->masks
);
1622 spin_unlock(&head
->masks_lock
);
1627 rhashtable_destroy(&newmask
->ht
);
1631 return ERR_PTR(err
);
1634 static int fl_check_assign_mask(struct cls_fl_head
*head
,
1635 struct cls_fl_filter
*fnew
,
1636 struct cls_fl_filter
*fold
,
1637 struct fl_flow_mask
*mask
)
1639 struct fl_flow_mask
*newmask
;
1644 /* Insert mask as temporary node to prevent concurrent creation of mask
1645 * with same key. Any concurrent lookups with same key will return
1646 * -EAGAIN because mask's refcnt is zero.
1648 fnew
->mask
= rhashtable_lookup_get_insert_fast(&head
->ht
,
1656 goto errout_cleanup
;
1659 newmask
= fl_create_new_mask(head
, mask
);
1660 if (IS_ERR(newmask
)) {
1661 ret
= PTR_ERR(newmask
);
1662 goto errout_cleanup
;
1665 fnew
->mask
= newmask
;
1667 } else if (IS_ERR(fnew
->mask
)) {
1668 ret
= PTR_ERR(fnew
->mask
);
1669 } else if (fold
&& fold
->mask
!= fnew
->mask
) {
1671 } else if (!refcount_inc_not_zero(&fnew
->mask
->refcnt
)) {
1672 /* Mask was deleted concurrently, try again */
1679 rhashtable_remove_fast(&head
->ht
, &mask
->ht_node
,
1684 static int fl_set_parms(struct net
*net
, struct tcf_proto
*tp
,
1685 struct cls_fl_filter
*f
, struct fl_flow_mask
*mask
,
1686 unsigned long base
, struct nlattr
**tb
,
1687 struct nlattr
*est
, bool ovr
,
1688 struct fl_flow_tmplt
*tmplt
, bool rtnl_held
,
1689 struct netlink_ext_ack
*extack
)
1693 err
= tcf_exts_validate(net
, tp
, tb
, est
, &f
->exts
, ovr
, rtnl_held
,
1698 if (tb
[TCA_FLOWER_CLASSID
]) {
1699 f
->res
.classid
= nla_get_u32(tb
[TCA_FLOWER_CLASSID
]);
1702 tcf_bind_filter(tp
, &f
->res
, base
);
1707 err
= fl_set_key(net
, tb
, &f
->key
, &mask
->key
, extack
);
1711 fl_mask_update_range(mask
);
1712 fl_set_masked_key(&f
->mkey
, &f
->key
, mask
);
1714 if (!fl_mask_fits_tmplt(tmplt
, mask
)) {
1715 NL_SET_ERR_MSG_MOD(extack
, "Mask does not fit the template");
1722 static int fl_ht_insert_unique(struct cls_fl_filter
*fnew
,
1723 struct cls_fl_filter
*fold
,
1726 struct fl_flow_mask
*mask
= fnew
->mask
;
1729 err
= rhashtable_lookup_insert_fast(&mask
->ht
,
1731 mask
->filter_ht_params
);
1734 /* It is okay if filter with same key exists when
1737 return fold
&& err
== -EEXIST
? 0 : err
;
1744 static int fl_change(struct net
*net
, struct sk_buff
*in_skb
,
1745 struct tcf_proto
*tp
, unsigned long base
,
1746 u32 handle
, struct nlattr
**tca
,
1747 void **arg
, bool ovr
, bool rtnl_held
,
1748 struct netlink_ext_ack
*extack
)
1750 struct cls_fl_head
*head
= fl_head_dereference(tp
);
1751 struct cls_fl_filter
*fold
= *arg
;
1752 struct cls_fl_filter
*fnew
;
1753 struct fl_flow_mask
*mask
;
1758 if (!tca
[TCA_OPTIONS
]) {
1763 mask
= kzalloc(sizeof(struct fl_flow_mask
), GFP_KERNEL
);
1769 tb
= kcalloc(TCA_FLOWER_MAX
+ 1, sizeof(struct nlattr
*), GFP_KERNEL
);
1772 goto errout_mask_alloc
;
1775 err
= nla_parse_nested_deprecated(tb
, TCA_FLOWER_MAX
,
1776 tca
[TCA_OPTIONS
], fl_policy
, NULL
);
1780 if (fold
&& handle
&& fold
->handle
!= handle
) {
1785 fnew
= kzalloc(sizeof(*fnew
), GFP_KERNEL
);
1790 INIT_LIST_HEAD(&fnew
->hw_list
);
1791 refcount_set(&fnew
->refcnt
, 1);
1793 err
= tcf_exts_init(&fnew
->exts
, net
, TCA_FLOWER_ACT
, 0);
1797 if (tb
[TCA_FLOWER_FLAGS
]) {
1798 fnew
->flags
= nla_get_u32(tb
[TCA_FLOWER_FLAGS
]);
1800 if (!tc_flags_valid(fnew
->flags
)) {
1806 err
= fl_set_parms(net
, tp
, fnew
, mask
, base
, tb
, tca
[TCA_RATE
], ovr
,
1807 tp
->chain
->tmplt_priv
, rtnl_held
, extack
);
1811 err
= fl_check_assign_mask(head
, fnew
, fold
, mask
);
1815 err
= fl_ht_insert_unique(fnew
, fold
, &in_ht
);
1819 if (!tc_skip_hw(fnew
->flags
)) {
1820 err
= fl_hw_replace_filter(tp
, fnew
, rtnl_held
, extack
);
1825 if (!tc_in_hw(fnew
->flags
))
1826 fnew
->flags
|= TCA_CLS_FLAGS_NOT_IN_HW
;
1828 spin_lock(&tp
->lock
);
1830 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1831 * proto again or create new one, if necessary.
1839 /* Fold filter was deleted concurrently. Retry lookup. */
1840 if (fold
->deleted
) {
1845 fnew
->handle
= handle
;
1848 struct rhashtable_params params
=
1849 fnew
->mask
->filter_ht_params
;
1851 err
= rhashtable_insert_fast(&fnew
->mask
->ht
,
1859 refcount_inc(&fnew
->refcnt
);
1860 rhashtable_remove_fast(&fold
->mask
->ht
,
1862 fold
->mask
->filter_ht_params
);
1863 idr_replace(&head
->handle_idr
, fnew
, fnew
->handle
);
1864 list_replace_rcu(&fold
->list
, &fnew
->list
);
1865 fold
->deleted
= true;
1867 spin_unlock(&tp
->lock
);
1869 fl_mask_put(head
, fold
->mask
);
1870 if (!tc_skip_hw(fold
->flags
))
1871 fl_hw_destroy_filter(tp
, fold
, rtnl_held
, NULL
);
1872 tcf_unbind_filter(tp
, &fold
->res
);
1873 /* Caller holds reference to fold, so refcnt is always > 0
1876 refcount_dec(&fold
->refcnt
);
1880 /* user specifies a handle and it doesn't exist */
1881 err
= idr_alloc_u32(&head
->handle_idr
, fnew
, &handle
,
1882 handle
, GFP_ATOMIC
);
1884 /* Filter with specified handle was concurrently
1885 * inserted after initial check in cls_api. This is not
1886 * necessarily an error if NLM_F_EXCL is not set in
1887 * message flags. Returning EAGAIN will cause cls_api to
1888 * try to update concurrently inserted rule.
1894 err
= idr_alloc_u32(&head
->handle_idr
, fnew
, &handle
,
1895 INT_MAX
, GFP_ATOMIC
);
1900 refcount_inc(&fnew
->refcnt
);
1901 fnew
->handle
= handle
;
1902 list_add_tail_rcu(&fnew
->list
, &fnew
->mask
->filters
);
1903 spin_unlock(&tp
->lock
);
1909 tcf_queue_work(&mask
->rwork
, fl_uninit_mask_free_work
);
1913 spin_lock(&tp
->lock
);
1915 fnew
->deleted
= true;
1916 spin_unlock(&tp
->lock
);
1917 if (!tc_skip_hw(fnew
->flags
))
1918 fl_hw_destroy_filter(tp
, fnew
, rtnl_held
, NULL
);
1920 rhashtable_remove_fast(&fnew
->mask
->ht
, &fnew
->ht_node
,
1921 fnew
->mask
->filter_ht_params
);
1923 fl_mask_put(head
, fnew
->mask
);
1929 tcf_queue_work(&mask
->rwork
, fl_uninit_mask_free_work
);
1936 static int fl_delete(struct tcf_proto
*tp
, void *arg
, bool *last
,
1937 bool rtnl_held
, struct netlink_ext_ack
*extack
)
1939 struct cls_fl_head
*head
= fl_head_dereference(tp
);
1940 struct cls_fl_filter
*f
= arg
;
1944 err
= __fl_delete(tp
, f
, &last_on_mask
, rtnl_held
, extack
);
1945 *last
= list_empty(&head
->masks
);
1951 static void fl_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
,
1954 struct cls_fl_head
*head
= fl_head_dereference(tp
);
1955 unsigned long id
= arg
->cookie
, tmp
;
1956 struct cls_fl_filter
*f
;
1958 arg
->count
= arg
->skip
;
1960 idr_for_each_entry_continue_ul(&head
->handle_idr
, f
, tmp
, id
) {
1961 /* don't return filters that are being deleted */
1962 if (!refcount_inc_not_zero(&f
->refcnt
))
1964 if (arg
->fn(tp
, f
, arg
) < 0) {
1975 static struct cls_fl_filter
*
1976 fl_get_next_hw_filter(struct tcf_proto
*tp
, struct cls_fl_filter
*f
, bool add
)
1978 struct cls_fl_head
*head
= fl_head_dereference(tp
);
1980 spin_lock(&tp
->lock
);
1981 if (list_empty(&head
->hw_filters
)) {
1982 spin_unlock(&tp
->lock
);
1987 f
= list_entry(&head
->hw_filters
, struct cls_fl_filter
,
1989 list_for_each_entry_continue(f
, &head
->hw_filters
, hw_list
) {
1990 if (!(add
&& f
->deleted
) && refcount_inc_not_zero(&f
->refcnt
)) {
1991 spin_unlock(&tp
->lock
);
1996 spin_unlock(&tp
->lock
);
2000 static int fl_reoffload(struct tcf_proto
*tp
, bool add
, flow_setup_cb_t
*cb
,
2001 void *cb_priv
, struct netlink_ext_ack
*extack
)
2003 struct tcf_block
*block
= tp
->chain
->block
;
2004 struct flow_cls_offload cls_flower
= {};
2005 struct cls_fl_filter
*f
= NULL
;
2008 /* hw_filters list can only be changed by hw offload functions after
2009 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2014 while ((f
= fl_get_next_hw_filter(tp
, f
, add
))) {
2016 flow_rule_alloc(tcf_exts_num_actions(&f
->exts
));
2017 if (!cls_flower
.rule
) {
2022 tc_cls_common_offload_init(&cls_flower
.common
, tp
, f
->flags
,
2024 cls_flower
.command
= add
?
2025 FLOW_CLS_REPLACE
: FLOW_CLS_DESTROY
;
2026 cls_flower
.cookie
= (unsigned long)f
;
2027 cls_flower
.rule
->match
.dissector
= &f
->mask
->dissector
;
2028 cls_flower
.rule
->match
.mask
= &f
->mask
->key
;
2029 cls_flower
.rule
->match
.key
= &f
->mkey
;
2031 err
= tc_setup_flow_action(&cls_flower
.rule
->action
, &f
->exts
);
2033 kfree(cls_flower
.rule
);
2034 if (tc_skip_sw(f
->flags
)) {
2035 NL_SET_ERR_MSG_MOD(extack
, "Failed to setup flow action");
2042 cls_flower
.classid
= f
->res
.classid
;
2044 err
= tc_setup_cb_reoffload(block
, tp
, add
, cb
,
2045 TC_SETUP_CLSFLOWER
, &cls_flower
,
2048 tc_cleanup_flow_action(&cls_flower
.rule
->action
);
2049 kfree(cls_flower
.rule
);
2062 static void fl_hw_add(struct tcf_proto
*tp
, void *type_data
)
2064 struct flow_cls_offload
*cls_flower
= type_data
;
2065 struct cls_fl_filter
*f
=
2066 (struct cls_fl_filter
*) cls_flower
->cookie
;
2067 struct cls_fl_head
*head
= fl_head_dereference(tp
);
2069 spin_lock(&tp
->lock
);
2070 list_add(&f
->hw_list
, &head
->hw_filters
);
2071 spin_unlock(&tp
->lock
);
2074 static void fl_hw_del(struct tcf_proto
*tp
, void *type_data
)
2076 struct flow_cls_offload
*cls_flower
= type_data
;
2077 struct cls_fl_filter
*f
=
2078 (struct cls_fl_filter
*) cls_flower
->cookie
;
2080 spin_lock(&tp
->lock
);
2081 if (!list_empty(&f
->hw_list
))
2082 list_del_init(&f
->hw_list
);
2083 spin_unlock(&tp
->lock
);
2086 static int fl_hw_create_tmplt(struct tcf_chain
*chain
,
2087 struct fl_flow_tmplt
*tmplt
)
2089 struct flow_cls_offload cls_flower
= {};
2090 struct tcf_block
*block
= chain
->block
;
2092 cls_flower
.rule
= flow_rule_alloc(0);
2093 if (!cls_flower
.rule
)
2096 cls_flower
.common
.chain_index
= chain
->index
;
2097 cls_flower
.command
= FLOW_CLS_TMPLT_CREATE
;
2098 cls_flower
.cookie
= (unsigned long) tmplt
;
2099 cls_flower
.rule
->match
.dissector
= &tmplt
->dissector
;
2100 cls_flower
.rule
->match
.mask
= &tmplt
->mask
;
2101 cls_flower
.rule
->match
.key
= &tmplt
->dummy_key
;
2103 /* We don't care if driver (any of them) fails to handle this
2104 * call. It serves just as a hint for it.
2106 tc_setup_cb_call(block
, TC_SETUP_CLSFLOWER
, &cls_flower
, false, true);
2107 kfree(cls_flower
.rule
);
2112 static void fl_hw_destroy_tmplt(struct tcf_chain
*chain
,
2113 struct fl_flow_tmplt
*tmplt
)
2115 struct flow_cls_offload cls_flower
= {};
2116 struct tcf_block
*block
= chain
->block
;
2118 cls_flower
.common
.chain_index
= chain
->index
;
2119 cls_flower
.command
= FLOW_CLS_TMPLT_DESTROY
;
2120 cls_flower
.cookie
= (unsigned long) tmplt
;
2122 tc_setup_cb_call(block
, TC_SETUP_CLSFLOWER
, &cls_flower
, false, true);
2125 static void *fl_tmplt_create(struct net
*net
, struct tcf_chain
*chain
,
2126 struct nlattr
**tca
,
2127 struct netlink_ext_ack
*extack
)
2129 struct fl_flow_tmplt
*tmplt
;
2133 if (!tca
[TCA_OPTIONS
])
2134 return ERR_PTR(-EINVAL
);
2136 tb
= kcalloc(TCA_FLOWER_MAX
+ 1, sizeof(struct nlattr
*), GFP_KERNEL
);
2138 return ERR_PTR(-ENOBUFS
);
2139 err
= nla_parse_nested_deprecated(tb
, TCA_FLOWER_MAX
,
2140 tca
[TCA_OPTIONS
], fl_policy
, NULL
);
2144 tmplt
= kzalloc(sizeof(*tmplt
), GFP_KERNEL
);
2149 tmplt
->chain
= chain
;
2150 err
= fl_set_key(net
, tb
, &tmplt
->dummy_key
, &tmplt
->mask
, extack
);
2154 fl_init_dissector(&tmplt
->dissector
, &tmplt
->mask
);
2156 err
= fl_hw_create_tmplt(chain
, tmplt
);
2167 return ERR_PTR(err
);
2170 static void fl_tmplt_destroy(void *tmplt_priv
)
2172 struct fl_flow_tmplt
*tmplt
= tmplt_priv
;
2174 fl_hw_destroy_tmplt(tmplt
->chain
, tmplt
);
2178 static int fl_dump_key_val(struct sk_buff
*skb
,
2179 void *val
, int val_type
,
2180 void *mask
, int mask_type
, int len
)
2184 if (!memchr_inv(mask
, 0, len
))
2186 err
= nla_put(skb
, val_type
, len
, val
);
2189 if (mask_type
!= TCA_FLOWER_UNSPEC
) {
2190 err
= nla_put(skb
, mask_type
, len
, mask
);
2197 static int fl_dump_key_port_range(struct sk_buff
*skb
, struct fl_flow_key
*key
,
2198 struct fl_flow_key
*mask
)
2200 if (fl_dump_key_val(skb
, &key
->tp_range
.tp_min
.dst
,
2201 TCA_FLOWER_KEY_PORT_DST_MIN
,
2202 &mask
->tp_range
.tp_min
.dst
, TCA_FLOWER_UNSPEC
,
2203 sizeof(key
->tp_range
.tp_min
.dst
)) ||
2204 fl_dump_key_val(skb
, &key
->tp_range
.tp_max
.dst
,
2205 TCA_FLOWER_KEY_PORT_DST_MAX
,
2206 &mask
->tp_range
.tp_max
.dst
, TCA_FLOWER_UNSPEC
,
2207 sizeof(key
->tp_range
.tp_max
.dst
)) ||
2208 fl_dump_key_val(skb
, &key
->tp_range
.tp_min
.src
,
2209 TCA_FLOWER_KEY_PORT_SRC_MIN
,
2210 &mask
->tp_range
.tp_min
.src
, TCA_FLOWER_UNSPEC
,
2211 sizeof(key
->tp_range
.tp_min
.src
)) ||
2212 fl_dump_key_val(skb
, &key
->tp_range
.tp_max
.src
,
2213 TCA_FLOWER_KEY_PORT_SRC_MAX
,
2214 &mask
->tp_range
.tp_max
.src
, TCA_FLOWER_UNSPEC
,
2215 sizeof(key
->tp_range
.tp_max
.src
)))
2221 static int fl_dump_key_mpls(struct sk_buff
*skb
,
2222 struct flow_dissector_key_mpls
*mpls_key
,
2223 struct flow_dissector_key_mpls
*mpls_mask
)
2227 if (!memchr_inv(mpls_mask
, 0, sizeof(*mpls_mask
)))
2229 if (mpls_mask
->mpls_ttl
) {
2230 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_TTL
,
2231 mpls_key
->mpls_ttl
);
2235 if (mpls_mask
->mpls_tc
) {
2236 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_TC
,
2241 if (mpls_mask
->mpls_label
) {
2242 err
= nla_put_u32(skb
, TCA_FLOWER_KEY_MPLS_LABEL
,
2243 mpls_key
->mpls_label
);
2247 if (mpls_mask
->mpls_bos
) {
2248 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_BOS
,
2249 mpls_key
->mpls_bos
);
2256 static int fl_dump_key_ip(struct sk_buff
*skb
, bool encap
,
2257 struct flow_dissector_key_ip
*key
,
2258 struct flow_dissector_key_ip
*mask
)
2260 int tos_key
= encap
? TCA_FLOWER_KEY_ENC_IP_TOS
: TCA_FLOWER_KEY_IP_TOS
;
2261 int ttl_key
= encap
? TCA_FLOWER_KEY_ENC_IP_TTL
: TCA_FLOWER_KEY_IP_TTL
;
2262 int tos_mask
= encap
? TCA_FLOWER_KEY_ENC_IP_TOS_MASK
: TCA_FLOWER_KEY_IP_TOS_MASK
;
2263 int ttl_mask
= encap
? TCA_FLOWER_KEY_ENC_IP_TTL_MASK
: TCA_FLOWER_KEY_IP_TTL_MASK
;
2265 if (fl_dump_key_val(skb
, &key
->tos
, tos_key
, &mask
->tos
, tos_mask
, sizeof(key
->tos
)) ||
2266 fl_dump_key_val(skb
, &key
->ttl
, ttl_key
, &mask
->ttl
, ttl_mask
, sizeof(key
->ttl
)))
2272 static int fl_dump_key_vlan(struct sk_buff
*skb
,
2273 int vlan_id_key
, int vlan_prio_key
,
2274 struct flow_dissector_key_vlan
*vlan_key
,
2275 struct flow_dissector_key_vlan
*vlan_mask
)
2279 if (!memchr_inv(vlan_mask
, 0, sizeof(*vlan_mask
)))
2281 if (vlan_mask
->vlan_id
) {
2282 err
= nla_put_u16(skb
, vlan_id_key
,
2287 if (vlan_mask
->vlan_priority
) {
2288 err
= nla_put_u8(skb
, vlan_prio_key
,
2289 vlan_key
->vlan_priority
);
2296 static void fl_get_key_flag(u32 dissector_key
, u32 dissector_mask
,
2297 u32
*flower_key
, u32
*flower_mask
,
2298 u32 flower_flag_bit
, u32 dissector_flag_bit
)
2300 if (dissector_mask
& dissector_flag_bit
) {
2301 *flower_mask
|= flower_flag_bit
;
2302 if (dissector_key
& dissector_flag_bit
)
2303 *flower_key
|= flower_flag_bit
;
2307 static int fl_dump_key_flags(struct sk_buff
*skb
, u32 flags_key
, u32 flags_mask
)
2313 if (!memchr_inv(&flags_mask
, 0, sizeof(flags_mask
)))
2319 fl_get_key_flag(flags_key
, flags_mask
, &key
, &mask
,
2320 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT
, FLOW_DIS_IS_FRAGMENT
);
2321 fl_get_key_flag(flags_key
, flags_mask
, &key
, &mask
,
2322 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST
,
2323 FLOW_DIS_FIRST_FRAG
);
2325 _key
= cpu_to_be32(key
);
2326 _mask
= cpu_to_be32(mask
);
2328 err
= nla_put(skb
, TCA_FLOWER_KEY_FLAGS
, 4, &_key
);
2332 return nla_put(skb
, TCA_FLOWER_KEY_FLAGS_MASK
, 4, &_mask
);
2335 static int fl_dump_key_geneve_opt(struct sk_buff
*skb
,
2336 struct flow_dissector_key_enc_opts
*enc_opts
)
2338 struct geneve_opt
*opt
;
2339 struct nlattr
*nest
;
2342 nest
= nla_nest_start_noflag(skb
, TCA_FLOWER_KEY_ENC_OPTS_GENEVE
);
2344 goto nla_put_failure
;
2346 while (enc_opts
->len
> opt_off
) {
2347 opt
= (struct geneve_opt
*)&enc_opts
->data
[opt_off
];
2349 if (nla_put_be16(skb
, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
,
2351 goto nla_put_failure
;
2352 if (nla_put_u8(skb
, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
,
2354 goto nla_put_failure
;
2355 if (nla_put(skb
, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
,
2356 opt
->length
* 4, opt
->opt_data
))
2357 goto nla_put_failure
;
2359 opt_off
+= sizeof(struct geneve_opt
) + opt
->length
* 4;
2361 nla_nest_end(skb
, nest
);
2365 nla_nest_cancel(skb
, nest
);
2369 static int fl_dump_key_vxlan_opt(struct sk_buff
*skb
,
2370 struct flow_dissector_key_enc_opts
*enc_opts
)
2372 struct vxlan_metadata
*md
;
2373 struct nlattr
*nest
;
2375 nest
= nla_nest_start_noflag(skb
, TCA_FLOWER_KEY_ENC_OPTS_VXLAN
);
2377 goto nla_put_failure
;
2379 md
= (struct vxlan_metadata
*)&enc_opts
->data
[0];
2380 if (nla_put_u32(skb
, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP
, md
->gbp
))
2381 goto nla_put_failure
;
2383 nla_nest_end(skb
, nest
);
2387 nla_nest_cancel(skb
, nest
);
2391 static int fl_dump_key_erspan_opt(struct sk_buff
*skb
,
2392 struct flow_dissector_key_enc_opts
*enc_opts
)
2394 struct erspan_metadata
*md
;
2395 struct nlattr
*nest
;
2397 nest
= nla_nest_start_noflag(skb
, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN
);
2399 goto nla_put_failure
;
2401 md
= (struct erspan_metadata
*)&enc_opts
->data
[0];
2402 if (nla_put_u8(skb
, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER
, md
->version
))
2403 goto nla_put_failure
;
2405 if (md
->version
== 1 &&
2406 nla_put_be32(skb
, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX
, md
->u
.index
))
2407 goto nla_put_failure
;
2409 if (md
->version
== 2 &&
2410 (nla_put_u8(skb
, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR
,
2412 nla_put_u8(skb
, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID
,
2413 get_hwid(&md
->u
.md2
))))
2414 goto nla_put_failure
;
2416 nla_nest_end(skb
, nest
);
2420 nla_nest_cancel(skb
, nest
);
2424 static int fl_dump_key_ct(struct sk_buff
*skb
,
2425 struct flow_dissector_key_ct
*key
,
2426 struct flow_dissector_key_ct
*mask
)
2428 if (IS_ENABLED(CONFIG_NF_CONNTRACK
) &&
2429 fl_dump_key_val(skb
, &key
->ct_state
, TCA_FLOWER_KEY_CT_STATE
,
2430 &mask
->ct_state
, TCA_FLOWER_KEY_CT_STATE_MASK
,
2431 sizeof(key
->ct_state
)))
2432 goto nla_put_failure
;
2434 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES
) &&
2435 fl_dump_key_val(skb
, &key
->ct_zone
, TCA_FLOWER_KEY_CT_ZONE
,
2436 &mask
->ct_zone
, TCA_FLOWER_KEY_CT_ZONE_MASK
,
2437 sizeof(key
->ct_zone
)))
2438 goto nla_put_failure
;
2440 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK
) &&
2441 fl_dump_key_val(skb
, &key
->ct_mark
, TCA_FLOWER_KEY_CT_MARK
,
2442 &mask
->ct_mark
, TCA_FLOWER_KEY_CT_MARK_MASK
,
2443 sizeof(key
->ct_mark
)))
2444 goto nla_put_failure
;
2446 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS
) &&
2447 fl_dump_key_val(skb
, &key
->ct_labels
, TCA_FLOWER_KEY_CT_LABELS
,
2448 &mask
->ct_labels
, TCA_FLOWER_KEY_CT_LABELS_MASK
,
2449 sizeof(key
->ct_labels
)))
2450 goto nla_put_failure
;
2458 static int fl_dump_key_options(struct sk_buff
*skb
, int enc_opt_type
,
2459 struct flow_dissector_key_enc_opts
*enc_opts
)
2461 struct nlattr
*nest
;
2467 nest
= nla_nest_start_noflag(skb
, enc_opt_type
);
2469 goto nla_put_failure
;
2471 switch (enc_opts
->dst_opt_type
) {
2472 case TUNNEL_GENEVE_OPT
:
2473 err
= fl_dump_key_geneve_opt(skb
, enc_opts
);
2475 goto nla_put_failure
;
2477 case TUNNEL_VXLAN_OPT
:
2478 err
= fl_dump_key_vxlan_opt(skb
, enc_opts
);
2480 goto nla_put_failure
;
2482 case TUNNEL_ERSPAN_OPT
:
2483 err
= fl_dump_key_erspan_opt(skb
, enc_opts
);
2485 goto nla_put_failure
;
2488 goto nla_put_failure
;
2490 nla_nest_end(skb
, nest
);
2494 nla_nest_cancel(skb
, nest
);
2498 static int fl_dump_key_enc_opt(struct sk_buff
*skb
,
2499 struct flow_dissector_key_enc_opts
*key_opts
,
2500 struct flow_dissector_key_enc_opts
*msk_opts
)
2504 err
= fl_dump_key_options(skb
, TCA_FLOWER_KEY_ENC_OPTS
, key_opts
);
2508 return fl_dump_key_options(skb
, TCA_FLOWER_KEY_ENC_OPTS_MASK
, msk_opts
);
2511 static int fl_dump_key(struct sk_buff
*skb
, struct net
*net
,
2512 struct fl_flow_key
*key
, struct fl_flow_key
*mask
)
2514 if (mask
->meta
.ingress_ifindex
) {
2515 struct net_device
*dev
;
2517 dev
= __dev_get_by_index(net
, key
->meta
.ingress_ifindex
);
2518 if (dev
&& nla_put_string(skb
, TCA_FLOWER_INDEV
, dev
->name
))
2519 goto nla_put_failure
;
2522 if (fl_dump_key_val(skb
, key
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST
,
2523 mask
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST_MASK
,
2524 sizeof(key
->eth
.dst
)) ||
2525 fl_dump_key_val(skb
, key
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC
,
2526 mask
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC_MASK
,
2527 sizeof(key
->eth
.src
)) ||
2528 fl_dump_key_val(skb
, &key
->basic
.n_proto
, TCA_FLOWER_KEY_ETH_TYPE
,
2529 &mask
->basic
.n_proto
, TCA_FLOWER_UNSPEC
,
2530 sizeof(key
->basic
.n_proto
)))
2531 goto nla_put_failure
;
2533 if (fl_dump_key_mpls(skb
, &key
->mpls
, &mask
->mpls
))
2534 goto nla_put_failure
;
2536 if (fl_dump_key_vlan(skb
, TCA_FLOWER_KEY_VLAN_ID
,
2537 TCA_FLOWER_KEY_VLAN_PRIO
, &key
->vlan
, &mask
->vlan
))
2538 goto nla_put_failure
;
2540 if (fl_dump_key_vlan(skb
, TCA_FLOWER_KEY_CVLAN_ID
,
2541 TCA_FLOWER_KEY_CVLAN_PRIO
,
2542 &key
->cvlan
, &mask
->cvlan
) ||
2543 (mask
->cvlan
.vlan_tpid
&&
2544 nla_put_be16(skb
, TCA_FLOWER_KEY_VLAN_ETH_TYPE
,
2545 key
->cvlan
.vlan_tpid
)))
2546 goto nla_put_failure
;
2548 if (mask
->basic
.n_proto
) {
2549 if (mask
->cvlan
.vlan_tpid
) {
2550 if (nla_put_be16(skb
, TCA_FLOWER_KEY_CVLAN_ETH_TYPE
,
2551 key
->basic
.n_proto
))
2552 goto nla_put_failure
;
2553 } else if (mask
->vlan
.vlan_tpid
) {
2554 if (nla_put_be16(skb
, TCA_FLOWER_KEY_VLAN_ETH_TYPE
,
2555 key
->basic
.n_proto
))
2556 goto nla_put_failure
;
2560 if ((key
->basic
.n_proto
== htons(ETH_P_IP
) ||
2561 key
->basic
.n_proto
== htons(ETH_P_IPV6
)) &&
2562 (fl_dump_key_val(skb
, &key
->basic
.ip_proto
, TCA_FLOWER_KEY_IP_PROTO
,
2563 &mask
->basic
.ip_proto
, TCA_FLOWER_UNSPEC
,
2564 sizeof(key
->basic
.ip_proto
)) ||
2565 fl_dump_key_ip(skb
, false, &key
->ip
, &mask
->ip
)))
2566 goto nla_put_failure
;
2568 if (key
->control
.addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
&&
2569 (fl_dump_key_val(skb
, &key
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC
,
2570 &mask
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC_MASK
,
2571 sizeof(key
->ipv4
.src
)) ||
2572 fl_dump_key_val(skb
, &key
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST
,
2573 &mask
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST_MASK
,
2574 sizeof(key
->ipv4
.dst
))))
2575 goto nla_put_failure
;
2576 else if (key
->control
.addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
&&
2577 (fl_dump_key_val(skb
, &key
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC
,
2578 &mask
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC_MASK
,
2579 sizeof(key
->ipv6
.src
)) ||
2580 fl_dump_key_val(skb
, &key
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST
,
2581 &mask
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST_MASK
,
2582 sizeof(key
->ipv6
.dst
))))
2583 goto nla_put_failure
;
2585 if (key
->basic
.ip_proto
== IPPROTO_TCP
&&
2586 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC
,
2587 &mask
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC_MASK
,
2588 sizeof(key
->tp
.src
)) ||
2589 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST
,
2590 &mask
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST_MASK
,
2591 sizeof(key
->tp
.dst
)) ||
2592 fl_dump_key_val(skb
, &key
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS
,
2593 &mask
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS_MASK
,
2594 sizeof(key
->tcp
.flags
))))
2595 goto nla_put_failure
;
2596 else if (key
->basic
.ip_proto
== IPPROTO_UDP
&&
2597 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC
,
2598 &mask
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC_MASK
,
2599 sizeof(key
->tp
.src
)) ||
2600 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST
,
2601 &mask
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST_MASK
,
2602 sizeof(key
->tp
.dst
))))
2603 goto nla_put_failure
;
2604 else if (key
->basic
.ip_proto
== IPPROTO_SCTP
&&
2605 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC
,
2606 &mask
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC_MASK
,
2607 sizeof(key
->tp
.src
)) ||
2608 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST
,
2609 &mask
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST_MASK
,
2610 sizeof(key
->tp
.dst
))))
2611 goto nla_put_failure
;
2612 else if (key
->basic
.n_proto
== htons(ETH_P_IP
) &&
2613 key
->basic
.ip_proto
== IPPROTO_ICMP
&&
2614 (fl_dump_key_val(skb
, &key
->icmp
.type
,
2615 TCA_FLOWER_KEY_ICMPV4_TYPE
, &mask
->icmp
.type
,
2616 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK
,
2617 sizeof(key
->icmp
.type
)) ||
2618 fl_dump_key_val(skb
, &key
->icmp
.code
,
2619 TCA_FLOWER_KEY_ICMPV4_CODE
, &mask
->icmp
.code
,
2620 TCA_FLOWER_KEY_ICMPV4_CODE_MASK
,
2621 sizeof(key
->icmp
.code
))))
2622 goto nla_put_failure
;
2623 else if (key
->basic
.n_proto
== htons(ETH_P_IPV6
) &&
2624 key
->basic
.ip_proto
== IPPROTO_ICMPV6
&&
2625 (fl_dump_key_val(skb
, &key
->icmp
.type
,
2626 TCA_FLOWER_KEY_ICMPV6_TYPE
, &mask
->icmp
.type
,
2627 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK
,
2628 sizeof(key
->icmp
.type
)) ||
2629 fl_dump_key_val(skb
, &key
->icmp
.code
,
2630 TCA_FLOWER_KEY_ICMPV6_CODE
, &mask
->icmp
.code
,
2631 TCA_FLOWER_KEY_ICMPV6_CODE_MASK
,
2632 sizeof(key
->icmp
.code
))))
2633 goto nla_put_failure
;
2634 else if ((key
->basic
.n_proto
== htons(ETH_P_ARP
) ||
2635 key
->basic
.n_proto
== htons(ETH_P_RARP
)) &&
2636 (fl_dump_key_val(skb
, &key
->arp
.sip
,
2637 TCA_FLOWER_KEY_ARP_SIP
, &mask
->arp
.sip
,
2638 TCA_FLOWER_KEY_ARP_SIP_MASK
,
2639 sizeof(key
->arp
.sip
)) ||
2640 fl_dump_key_val(skb
, &key
->arp
.tip
,
2641 TCA_FLOWER_KEY_ARP_TIP
, &mask
->arp
.tip
,
2642 TCA_FLOWER_KEY_ARP_TIP_MASK
,
2643 sizeof(key
->arp
.tip
)) ||
2644 fl_dump_key_val(skb
, &key
->arp
.op
,
2645 TCA_FLOWER_KEY_ARP_OP
, &mask
->arp
.op
,
2646 TCA_FLOWER_KEY_ARP_OP_MASK
,
2647 sizeof(key
->arp
.op
)) ||
2648 fl_dump_key_val(skb
, key
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA
,
2649 mask
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA_MASK
,
2650 sizeof(key
->arp
.sha
)) ||
2651 fl_dump_key_val(skb
, key
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA
,
2652 mask
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA_MASK
,
2653 sizeof(key
->arp
.tha
))))
2654 goto nla_put_failure
;
2656 if ((key
->basic
.ip_proto
== IPPROTO_TCP
||
2657 key
->basic
.ip_proto
== IPPROTO_UDP
||
2658 key
->basic
.ip_proto
== IPPROTO_SCTP
) &&
2659 fl_dump_key_port_range(skb
, key
, mask
))
2660 goto nla_put_failure
;
2662 if (key
->enc_control
.addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
&&
2663 (fl_dump_key_val(skb
, &key
->enc_ipv4
.src
,
2664 TCA_FLOWER_KEY_ENC_IPV4_SRC
, &mask
->enc_ipv4
.src
,
2665 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
,
2666 sizeof(key
->enc_ipv4
.src
)) ||
2667 fl_dump_key_val(skb
, &key
->enc_ipv4
.dst
,
2668 TCA_FLOWER_KEY_ENC_IPV4_DST
, &mask
->enc_ipv4
.dst
,
2669 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
,
2670 sizeof(key
->enc_ipv4
.dst
))))
2671 goto nla_put_failure
;
2672 else if (key
->enc_control
.addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
&&
2673 (fl_dump_key_val(skb
, &key
->enc_ipv6
.src
,
2674 TCA_FLOWER_KEY_ENC_IPV6_SRC
, &mask
->enc_ipv6
.src
,
2675 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
,
2676 sizeof(key
->enc_ipv6
.src
)) ||
2677 fl_dump_key_val(skb
, &key
->enc_ipv6
.dst
,
2678 TCA_FLOWER_KEY_ENC_IPV6_DST
,
2679 &mask
->enc_ipv6
.dst
,
2680 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
,
2681 sizeof(key
->enc_ipv6
.dst
))))
2682 goto nla_put_failure
;
2684 if (fl_dump_key_val(skb
, &key
->enc_key_id
, TCA_FLOWER_KEY_ENC_KEY_ID
,
2685 &mask
->enc_key_id
, TCA_FLOWER_UNSPEC
,
2686 sizeof(key
->enc_key_id
)) ||
2687 fl_dump_key_val(skb
, &key
->enc_tp
.src
,
2688 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
,
2690 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
,
2691 sizeof(key
->enc_tp
.src
)) ||
2692 fl_dump_key_val(skb
, &key
->enc_tp
.dst
,
2693 TCA_FLOWER_KEY_ENC_UDP_DST_PORT
,
2695 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
,
2696 sizeof(key
->enc_tp
.dst
)) ||
2697 fl_dump_key_ip(skb
, true, &key
->enc_ip
, &mask
->enc_ip
) ||
2698 fl_dump_key_enc_opt(skb
, &key
->enc_opts
, &mask
->enc_opts
))
2699 goto nla_put_failure
;
2701 if (fl_dump_key_ct(skb
, &key
->ct
, &mask
->ct
))
2702 goto nla_put_failure
;
2704 if (fl_dump_key_flags(skb
, key
->control
.flags
, mask
->control
.flags
))
2705 goto nla_put_failure
;
2713 static int fl_dump(struct net
*net
, struct tcf_proto
*tp
, void *fh
,
2714 struct sk_buff
*skb
, struct tcmsg
*t
, bool rtnl_held
)
2716 struct cls_fl_filter
*f
= fh
;
2717 struct nlattr
*nest
;
2718 struct fl_flow_key
*key
, *mask
;
2724 t
->tcm_handle
= f
->handle
;
2726 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
2728 goto nla_put_failure
;
2730 spin_lock(&tp
->lock
);
2732 if (f
->res
.classid
&&
2733 nla_put_u32(skb
, TCA_FLOWER_CLASSID
, f
->res
.classid
))
2734 goto nla_put_failure_locked
;
2737 mask
= &f
->mask
->key
;
2738 skip_hw
= tc_skip_hw(f
->flags
);
2740 if (fl_dump_key(skb
, net
, key
, mask
))
2741 goto nla_put_failure_locked
;
2743 if (f
->flags
&& nla_put_u32(skb
, TCA_FLOWER_FLAGS
, f
->flags
))
2744 goto nla_put_failure_locked
;
2746 spin_unlock(&tp
->lock
);
2749 fl_hw_update_stats(tp
, f
, rtnl_held
);
2751 if (nla_put_u32(skb
, TCA_FLOWER_IN_HW_COUNT
, f
->in_hw_count
))
2752 goto nla_put_failure
;
2754 if (tcf_exts_dump(skb
, &f
->exts
))
2755 goto nla_put_failure
;
2757 nla_nest_end(skb
, nest
);
2759 if (tcf_exts_dump_stats(skb
, &f
->exts
) < 0)
2760 goto nla_put_failure
;
2764 nla_put_failure_locked
:
2765 spin_unlock(&tp
->lock
);
2767 nla_nest_cancel(skb
, nest
);
2771 static int fl_tmplt_dump(struct sk_buff
*skb
, struct net
*net
, void *tmplt_priv
)
2773 struct fl_flow_tmplt
*tmplt
= tmplt_priv
;
2774 struct fl_flow_key
*key
, *mask
;
2775 struct nlattr
*nest
;
2777 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
2779 goto nla_put_failure
;
2781 key
= &tmplt
->dummy_key
;
2782 mask
= &tmplt
->mask
;
2784 if (fl_dump_key(skb
, net
, key
, mask
))
2785 goto nla_put_failure
;
2787 nla_nest_end(skb
, nest
);
2792 nla_nest_cancel(skb
, nest
);
2796 static void fl_bind_class(void *fh
, u32 classid
, unsigned long cl
, void *q
,
2799 struct cls_fl_filter
*f
= fh
;
2801 if (f
&& f
->res
.classid
== classid
) {
2803 __tcf_bind_filter(q
, &f
->res
, base
);
2805 __tcf_unbind_filter(q
, &f
->res
);
2809 static bool fl_delete_empty(struct tcf_proto
*tp
)
2811 struct cls_fl_head
*head
= fl_head_dereference(tp
);
2813 spin_lock(&tp
->lock
);
2814 tp
->deleting
= idr_is_empty(&head
->handle_idr
);
2815 spin_unlock(&tp
->lock
);
2817 return tp
->deleting
;
2820 static struct tcf_proto_ops cls_fl_ops __read_mostly
= {
2822 .classify
= fl_classify
,
2824 .destroy
= fl_destroy
,
2827 .change
= fl_change
,
2828 .delete = fl_delete
,
2829 .delete_empty
= fl_delete_empty
,
2831 .reoffload
= fl_reoffload
,
2832 .hw_add
= fl_hw_add
,
2833 .hw_del
= fl_hw_del
,
2835 .bind_class
= fl_bind_class
,
2836 .tmplt_create
= fl_tmplt_create
,
2837 .tmplt_destroy
= fl_tmplt_destroy
,
2838 .tmplt_dump
= fl_tmplt_dump
,
2839 .owner
= THIS_MODULE
,
2840 .flags
= TCF_PROTO_OPS_DOIT_UNLOCKED
,
2843 static int __init
cls_fl_init(void)
2845 return register_tcf_proto_ops(&cls_fl_ops
);
2848 static void __exit
cls_fl_exit(void)
2850 unregister_tcf_proto_ops(&cls_fl_ops
);
2853 module_init(cls_fl_init
);
2854 module_exit(cls_fl_exit
);
2856 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2857 MODULE_DESCRIPTION("Flower classifier");
2858 MODULE_LICENSE("GPL v2");