1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_flower.c Flower classifier
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
18 #include <linux/mpls.h>
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
23 #include <net/flow_dissector.h>
24 #include <net/geneve.h>
25 #include <net/vxlan.h>
26 #include <net/erspan.h>
29 #include <net/dst_metadata.h>
31 #include <uapi/linux/netfilter/nf_conntrack_common.h>
34 struct flow_dissector_key_meta meta
;
35 struct flow_dissector_key_control control
;
36 struct flow_dissector_key_control enc_control
;
37 struct flow_dissector_key_basic basic
;
38 struct flow_dissector_key_eth_addrs eth
;
39 struct flow_dissector_key_vlan vlan
;
40 struct flow_dissector_key_vlan cvlan
;
42 struct flow_dissector_key_ipv4_addrs ipv4
;
43 struct flow_dissector_key_ipv6_addrs ipv6
;
45 struct flow_dissector_key_ports tp
;
46 struct flow_dissector_key_icmp icmp
;
47 struct flow_dissector_key_arp arp
;
48 struct flow_dissector_key_keyid enc_key_id
;
50 struct flow_dissector_key_ipv4_addrs enc_ipv4
;
51 struct flow_dissector_key_ipv6_addrs enc_ipv6
;
53 struct flow_dissector_key_ports enc_tp
;
54 struct flow_dissector_key_mpls mpls
;
55 struct flow_dissector_key_tcp tcp
;
56 struct flow_dissector_key_ip ip
;
57 struct flow_dissector_key_ip enc_ip
;
58 struct flow_dissector_key_enc_opts enc_opts
;
60 struct flow_dissector_key_ports tp
;
62 struct flow_dissector_key_ports tp_min
;
63 struct flow_dissector_key_ports tp_max
;
66 struct flow_dissector_key_ct ct
;
67 struct flow_dissector_key_hash hash
;
68 } __aligned(BITS_PER_LONG
/ 8); /* Ensure that we can do comparisons as longs. */
70 struct fl_flow_mask_range
{
71 unsigned short int start
;
72 unsigned short int end
;
76 struct fl_flow_key key
;
77 struct fl_flow_mask_range range
;
79 struct rhash_head ht_node
;
81 struct rhashtable_params filter_ht_params
;
82 struct flow_dissector dissector
;
83 struct list_head filters
;
84 struct rcu_work rwork
;
85 struct list_head list
;
89 struct fl_flow_tmplt
{
90 struct fl_flow_key dummy_key
;
91 struct fl_flow_key mask
;
92 struct flow_dissector dissector
;
93 struct tcf_chain
*chain
;
98 spinlock_t masks_lock
; /* Protect masks list */
99 struct list_head masks
;
100 struct list_head hw_filters
;
101 struct rcu_work rwork
;
102 struct idr handle_idr
;
105 struct cls_fl_filter
{
106 struct fl_flow_mask
*mask
;
107 struct rhash_head ht_node
;
108 struct fl_flow_key mkey
;
109 struct tcf_exts exts
;
110 struct tcf_result res
;
111 struct fl_flow_key key
;
112 struct list_head list
;
113 struct list_head hw_list
;
117 struct rcu_work rwork
;
118 struct net_device
*hw_dev
;
119 /* Flower classifier is unlocked, which means that its reference counter
120 * can be changed concurrently without any kind of external
121 * synchronization. Use atomic reference counter to be concurrency-safe.
127 static const struct rhashtable_params mask_ht_params
= {
128 .key_offset
= offsetof(struct fl_flow_mask
, key
),
129 .key_len
= sizeof(struct fl_flow_key
),
130 .head_offset
= offsetof(struct fl_flow_mask
, ht_node
),
131 .automatic_shrinking
= true,
134 static unsigned short int fl_mask_range(const struct fl_flow_mask
*mask
)
136 return mask
->range
.end
- mask
->range
.start
;
139 static void fl_mask_update_range(struct fl_flow_mask
*mask
)
141 const u8
*bytes
= (const u8
*) &mask
->key
;
142 size_t size
= sizeof(mask
->key
);
143 size_t i
, first
= 0, last
;
145 for (i
= 0; i
< size
; i
++) {
152 for (i
= size
- 1; i
!= first
; i
--) {
158 mask
->range
.start
= rounddown(first
, sizeof(long));
159 mask
->range
.end
= roundup(last
+ 1, sizeof(long));
162 static void *fl_key_get_start(struct fl_flow_key
*key
,
163 const struct fl_flow_mask
*mask
)
165 return (u8
*) key
+ mask
->range
.start
;
168 static void fl_set_masked_key(struct fl_flow_key
*mkey
, struct fl_flow_key
*key
,
169 struct fl_flow_mask
*mask
)
171 const long *lkey
= fl_key_get_start(key
, mask
);
172 const long *lmask
= fl_key_get_start(&mask
->key
, mask
);
173 long *lmkey
= fl_key_get_start(mkey
, mask
);
176 for (i
= 0; i
< fl_mask_range(mask
); i
+= sizeof(long))
177 *lmkey
++ = *lkey
++ & *lmask
++;
180 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt
*tmplt
,
181 struct fl_flow_mask
*mask
)
183 const long *lmask
= fl_key_get_start(&mask
->key
, mask
);
189 ltmplt
= fl_key_get_start(&tmplt
->mask
, mask
);
190 for (i
= 0; i
< fl_mask_range(mask
); i
+= sizeof(long)) {
191 if (~*ltmplt
++ & *lmask
++)
197 static void fl_clear_masked_range(struct fl_flow_key
*key
,
198 struct fl_flow_mask
*mask
)
200 memset(fl_key_get_start(key
, mask
), 0, fl_mask_range(mask
));
203 static bool fl_range_port_dst_cmp(struct cls_fl_filter
*filter
,
204 struct fl_flow_key
*key
,
205 struct fl_flow_key
*mkey
)
207 __be16 min_mask
, max_mask
, min_val
, max_val
;
209 min_mask
= htons(filter
->mask
->key
.tp_range
.tp_min
.dst
);
210 max_mask
= htons(filter
->mask
->key
.tp_range
.tp_max
.dst
);
211 min_val
= htons(filter
->key
.tp_range
.tp_min
.dst
);
212 max_val
= htons(filter
->key
.tp_range
.tp_max
.dst
);
214 if (min_mask
&& max_mask
) {
215 if (htons(key
->tp_range
.tp
.dst
) < min_val
||
216 htons(key
->tp_range
.tp
.dst
) > max_val
)
219 /* skb does not have min and max values */
220 mkey
->tp_range
.tp_min
.dst
= filter
->mkey
.tp_range
.tp_min
.dst
;
221 mkey
->tp_range
.tp_max
.dst
= filter
->mkey
.tp_range
.tp_max
.dst
;
226 static bool fl_range_port_src_cmp(struct cls_fl_filter
*filter
,
227 struct fl_flow_key
*key
,
228 struct fl_flow_key
*mkey
)
230 __be16 min_mask
, max_mask
, min_val
, max_val
;
232 min_mask
= htons(filter
->mask
->key
.tp_range
.tp_min
.src
);
233 max_mask
= htons(filter
->mask
->key
.tp_range
.tp_max
.src
);
234 min_val
= htons(filter
->key
.tp_range
.tp_min
.src
);
235 max_val
= htons(filter
->key
.tp_range
.tp_max
.src
);
237 if (min_mask
&& max_mask
) {
238 if (htons(key
->tp_range
.tp
.src
) < min_val
||
239 htons(key
->tp_range
.tp
.src
) > max_val
)
242 /* skb does not have min and max values */
243 mkey
->tp_range
.tp_min
.src
= filter
->mkey
.tp_range
.tp_min
.src
;
244 mkey
->tp_range
.tp_max
.src
= filter
->mkey
.tp_range
.tp_max
.src
;
249 static struct cls_fl_filter
*__fl_lookup(struct fl_flow_mask
*mask
,
250 struct fl_flow_key
*mkey
)
252 return rhashtable_lookup_fast(&mask
->ht
, fl_key_get_start(mkey
, mask
),
253 mask
->filter_ht_params
);
256 static struct cls_fl_filter
*fl_lookup_range(struct fl_flow_mask
*mask
,
257 struct fl_flow_key
*mkey
,
258 struct fl_flow_key
*key
)
260 struct cls_fl_filter
*filter
, *f
;
262 list_for_each_entry_rcu(filter
, &mask
->filters
, list
) {
263 if (!fl_range_port_dst_cmp(filter
, key
, mkey
))
266 if (!fl_range_port_src_cmp(filter
, key
, mkey
))
269 f
= __fl_lookup(mask
, mkey
);
276 static noinline_for_stack
277 struct cls_fl_filter
*fl_mask_lookup(struct fl_flow_mask
*mask
, struct fl_flow_key
*key
)
279 struct fl_flow_key mkey
;
281 fl_set_masked_key(&mkey
, key
, mask
);
282 if ((mask
->flags
& TCA_FLOWER_MASK_FLAGS_RANGE
))
283 return fl_lookup_range(mask
, &mkey
, key
);
285 return __fl_lookup(mask
, &mkey
);
288 static u16 fl_ct_info_to_flower_map
[] = {
289 [IP_CT_ESTABLISHED
] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED
|
290 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED
,
291 [IP_CT_RELATED
] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED
|
292 TCA_FLOWER_KEY_CT_FLAGS_RELATED
,
293 [IP_CT_ESTABLISHED_REPLY
] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED
|
294 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED
,
295 [IP_CT_RELATED_REPLY
] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED
|
296 TCA_FLOWER_KEY_CT_FLAGS_RELATED
,
297 [IP_CT_NEW
] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED
|
298 TCA_FLOWER_KEY_CT_FLAGS_NEW
,
301 static int fl_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
302 struct tcf_result
*res
)
304 struct cls_fl_head
*head
= rcu_dereference_bh(tp
->root
);
305 struct fl_flow_key skb_key
;
306 struct fl_flow_mask
*mask
;
307 struct cls_fl_filter
*f
;
309 list_for_each_entry_rcu(mask
, &head
->masks
, list
) {
310 flow_dissector_init_keys(&skb_key
.control
, &skb_key
.basic
);
311 fl_clear_masked_range(&skb_key
, mask
);
313 skb_flow_dissect_meta(skb
, &mask
->dissector
, &skb_key
);
314 /* skb_flow_dissect() does not set n_proto in case an unknown
315 * protocol, so do it rather here.
317 skb_key
.basic
.n_proto
= skb_protocol(skb
, false);
318 skb_flow_dissect_tunnel_info(skb
, &mask
->dissector
, &skb_key
);
319 skb_flow_dissect_ct(skb
, &mask
->dissector
, &skb_key
,
320 fl_ct_info_to_flower_map
,
321 ARRAY_SIZE(fl_ct_info_to_flower_map
));
322 skb_flow_dissect_hash(skb
, &mask
->dissector
, &skb_key
);
323 skb_flow_dissect(skb
, &mask
->dissector
, &skb_key
, 0);
325 f
= fl_mask_lookup(mask
, &skb_key
);
326 if (f
&& !tc_skip_sw(f
->flags
)) {
328 return tcf_exts_exec(skb
, &f
->exts
, res
);
334 static int fl_init(struct tcf_proto
*tp
)
336 struct cls_fl_head
*head
;
338 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
342 spin_lock_init(&head
->masks_lock
);
343 INIT_LIST_HEAD_RCU(&head
->masks
);
344 INIT_LIST_HEAD(&head
->hw_filters
);
345 rcu_assign_pointer(tp
->root
, head
);
346 idr_init(&head
->handle_idr
);
348 return rhashtable_init(&head
->ht
, &mask_ht_params
);
351 static void fl_mask_free(struct fl_flow_mask
*mask
, bool mask_init_done
)
353 /* temporary masks don't have their filters list and ht initialized */
354 if (mask_init_done
) {
355 WARN_ON(!list_empty(&mask
->filters
));
356 rhashtable_destroy(&mask
->ht
);
361 static void fl_mask_free_work(struct work_struct
*work
)
363 struct fl_flow_mask
*mask
= container_of(to_rcu_work(work
),
364 struct fl_flow_mask
, rwork
);
366 fl_mask_free(mask
, true);
369 static void fl_uninit_mask_free_work(struct work_struct
*work
)
371 struct fl_flow_mask
*mask
= container_of(to_rcu_work(work
),
372 struct fl_flow_mask
, rwork
);
374 fl_mask_free(mask
, false);
377 static bool fl_mask_put(struct cls_fl_head
*head
, struct fl_flow_mask
*mask
)
379 if (!refcount_dec_and_test(&mask
->refcnt
))
382 rhashtable_remove_fast(&head
->ht
, &mask
->ht_node
, mask_ht_params
);
384 spin_lock(&head
->masks_lock
);
385 list_del_rcu(&mask
->list
);
386 spin_unlock(&head
->masks_lock
);
388 tcf_queue_work(&mask
->rwork
, fl_mask_free_work
);
393 static struct cls_fl_head
*fl_head_dereference(struct tcf_proto
*tp
)
395 /* Flower classifier only changes root pointer during init and destroy.
396 * Users must obtain reference to tcf_proto instance before calling its
397 * API, so tp->root pointer is protected from concurrent call to
398 * fl_destroy() by reference counting.
400 return rcu_dereference_raw(tp
->root
);
403 static void __fl_destroy_filter(struct cls_fl_filter
*f
)
405 tcf_exts_destroy(&f
->exts
);
406 tcf_exts_put_net(&f
->exts
);
410 static void fl_destroy_filter_work(struct work_struct
*work
)
412 struct cls_fl_filter
*f
= container_of(to_rcu_work(work
),
413 struct cls_fl_filter
, rwork
);
415 __fl_destroy_filter(f
);
418 static void fl_hw_destroy_filter(struct tcf_proto
*tp
, struct cls_fl_filter
*f
,
419 bool rtnl_held
, struct netlink_ext_ack
*extack
)
421 struct tcf_block
*block
= tp
->chain
->block
;
422 struct flow_cls_offload cls_flower
= {};
424 tc_cls_common_offload_init(&cls_flower
.common
, tp
, f
->flags
, extack
);
425 cls_flower
.command
= FLOW_CLS_DESTROY
;
426 cls_flower
.cookie
= (unsigned long) f
;
428 tc_setup_cb_destroy(block
, tp
, TC_SETUP_CLSFLOWER
, &cls_flower
, false,
429 &f
->flags
, &f
->in_hw_count
, rtnl_held
);
433 static int fl_hw_replace_filter(struct tcf_proto
*tp
,
434 struct cls_fl_filter
*f
, bool rtnl_held
,
435 struct netlink_ext_ack
*extack
)
437 struct tcf_block
*block
= tp
->chain
->block
;
438 struct flow_cls_offload cls_flower
= {};
439 bool skip_sw
= tc_skip_sw(f
->flags
);
442 cls_flower
.rule
= flow_rule_alloc(tcf_exts_num_actions(&f
->exts
));
443 if (!cls_flower
.rule
)
446 tc_cls_common_offload_init(&cls_flower
.common
, tp
, f
->flags
, extack
);
447 cls_flower
.command
= FLOW_CLS_REPLACE
;
448 cls_flower
.cookie
= (unsigned long) f
;
449 cls_flower
.rule
->match
.dissector
= &f
->mask
->dissector
;
450 cls_flower
.rule
->match
.mask
= &f
->mask
->key
;
451 cls_flower
.rule
->match
.key
= &f
->mkey
;
452 cls_flower
.classid
= f
->res
.classid
;
454 err
= tc_setup_flow_action(&cls_flower
.rule
->action
, &f
->exts
);
456 kfree(cls_flower
.rule
);
458 NL_SET_ERR_MSG_MOD(extack
, "Failed to setup flow action");
464 err
= tc_setup_cb_add(block
, tp
, TC_SETUP_CLSFLOWER
, &cls_flower
,
465 skip_sw
, &f
->flags
, &f
->in_hw_count
, rtnl_held
);
466 tc_cleanup_flow_action(&cls_flower
.rule
->action
);
467 kfree(cls_flower
.rule
);
470 fl_hw_destroy_filter(tp
, f
, rtnl_held
, NULL
);
474 if (skip_sw
&& !(f
->flags
& TCA_CLS_FLAGS_IN_HW
))
480 static void fl_hw_update_stats(struct tcf_proto
*tp
, struct cls_fl_filter
*f
,
483 struct tcf_block
*block
= tp
->chain
->block
;
484 struct flow_cls_offload cls_flower
= {};
486 tc_cls_common_offload_init(&cls_flower
.common
, tp
, f
->flags
, NULL
);
487 cls_flower
.command
= FLOW_CLS_STATS
;
488 cls_flower
.cookie
= (unsigned long) f
;
489 cls_flower
.classid
= f
->res
.classid
;
491 tc_setup_cb_call(block
, TC_SETUP_CLSFLOWER
, &cls_flower
, false,
494 tcf_exts_stats_update(&f
->exts
, cls_flower
.stats
.bytes
,
495 cls_flower
.stats
.pkts
,
496 cls_flower
.stats
.drops
,
497 cls_flower
.stats
.lastused
,
498 cls_flower
.stats
.used_hw_stats
,
499 cls_flower
.stats
.used_hw_stats_valid
);
502 static void __fl_put(struct cls_fl_filter
*f
)
504 if (!refcount_dec_and_test(&f
->refcnt
))
507 if (tcf_exts_get_net(&f
->exts
))
508 tcf_queue_work(&f
->rwork
, fl_destroy_filter_work
);
510 __fl_destroy_filter(f
);
513 static struct cls_fl_filter
*__fl_get(struct cls_fl_head
*head
, u32 handle
)
515 struct cls_fl_filter
*f
;
518 f
= idr_find(&head
->handle_idr
, handle
);
519 if (f
&& !refcount_inc_not_zero(&f
->refcnt
))
526 static int __fl_delete(struct tcf_proto
*tp
, struct cls_fl_filter
*f
,
527 bool *last
, bool rtnl_held
,
528 struct netlink_ext_ack
*extack
)
530 struct cls_fl_head
*head
= fl_head_dereference(tp
);
534 spin_lock(&tp
->lock
);
536 spin_unlock(&tp
->lock
);
541 rhashtable_remove_fast(&f
->mask
->ht
, &f
->ht_node
,
542 f
->mask
->filter_ht_params
);
543 idr_remove(&head
->handle_idr
, f
->handle
);
544 list_del_rcu(&f
->list
);
545 spin_unlock(&tp
->lock
);
547 *last
= fl_mask_put(head
, f
->mask
);
548 if (!tc_skip_hw(f
->flags
))
549 fl_hw_destroy_filter(tp
, f
, rtnl_held
, extack
);
550 tcf_unbind_filter(tp
, &f
->res
);
556 static void fl_destroy_sleepable(struct work_struct
*work
)
558 struct cls_fl_head
*head
= container_of(to_rcu_work(work
),
562 rhashtable_destroy(&head
->ht
);
564 module_put(THIS_MODULE
);
567 static void fl_destroy(struct tcf_proto
*tp
, bool rtnl_held
,
568 struct netlink_ext_ack
*extack
)
570 struct cls_fl_head
*head
= fl_head_dereference(tp
);
571 struct fl_flow_mask
*mask
, *next_mask
;
572 struct cls_fl_filter
*f
, *next
;
575 list_for_each_entry_safe(mask
, next_mask
, &head
->masks
, list
) {
576 list_for_each_entry_safe(f
, next
, &mask
->filters
, list
) {
577 __fl_delete(tp
, f
, &last
, rtnl_held
, extack
);
582 idr_destroy(&head
->handle_idr
);
584 __module_get(THIS_MODULE
);
585 tcf_queue_work(&head
->rwork
, fl_destroy_sleepable
);
588 static void fl_put(struct tcf_proto
*tp
, void *arg
)
590 struct cls_fl_filter
*f
= arg
;
595 static void *fl_get(struct tcf_proto
*tp
, u32 handle
)
597 struct cls_fl_head
*head
= fl_head_dereference(tp
);
599 return __fl_get(head
, handle
);
602 static const struct nla_policy fl_policy
[TCA_FLOWER_MAX
+ 1] = {
603 [TCA_FLOWER_UNSPEC
] = { .type
= NLA_UNSPEC
},
604 [TCA_FLOWER_CLASSID
] = { .type
= NLA_U32
},
605 [TCA_FLOWER_INDEV
] = { .type
= NLA_STRING
,
607 [TCA_FLOWER_KEY_ETH_DST
] = { .len
= ETH_ALEN
},
608 [TCA_FLOWER_KEY_ETH_DST_MASK
] = { .len
= ETH_ALEN
},
609 [TCA_FLOWER_KEY_ETH_SRC
] = { .len
= ETH_ALEN
},
610 [TCA_FLOWER_KEY_ETH_SRC_MASK
] = { .len
= ETH_ALEN
},
611 [TCA_FLOWER_KEY_ETH_TYPE
] = { .type
= NLA_U16
},
612 [TCA_FLOWER_KEY_IP_PROTO
] = { .type
= NLA_U8
},
613 [TCA_FLOWER_KEY_IPV4_SRC
] = { .type
= NLA_U32
},
614 [TCA_FLOWER_KEY_IPV4_SRC_MASK
] = { .type
= NLA_U32
},
615 [TCA_FLOWER_KEY_IPV4_DST
] = { .type
= NLA_U32
},
616 [TCA_FLOWER_KEY_IPV4_DST_MASK
] = { .type
= NLA_U32
},
617 [TCA_FLOWER_KEY_IPV6_SRC
] = { .len
= sizeof(struct in6_addr
) },
618 [TCA_FLOWER_KEY_IPV6_SRC_MASK
] = { .len
= sizeof(struct in6_addr
) },
619 [TCA_FLOWER_KEY_IPV6_DST
] = { .len
= sizeof(struct in6_addr
) },
620 [TCA_FLOWER_KEY_IPV6_DST_MASK
] = { .len
= sizeof(struct in6_addr
) },
621 [TCA_FLOWER_KEY_TCP_SRC
] = { .type
= NLA_U16
},
622 [TCA_FLOWER_KEY_TCP_DST
] = { .type
= NLA_U16
},
623 [TCA_FLOWER_KEY_UDP_SRC
] = { .type
= NLA_U16
},
624 [TCA_FLOWER_KEY_UDP_DST
] = { .type
= NLA_U16
},
625 [TCA_FLOWER_KEY_VLAN_ID
] = { .type
= NLA_U16
},
626 [TCA_FLOWER_KEY_VLAN_PRIO
] = { .type
= NLA_U8
},
627 [TCA_FLOWER_KEY_VLAN_ETH_TYPE
] = { .type
= NLA_U16
},
628 [TCA_FLOWER_KEY_ENC_KEY_ID
] = { .type
= NLA_U32
},
629 [TCA_FLOWER_KEY_ENC_IPV4_SRC
] = { .type
= NLA_U32
},
630 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
] = { .type
= NLA_U32
},
631 [TCA_FLOWER_KEY_ENC_IPV4_DST
] = { .type
= NLA_U32
},
632 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
] = { .type
= NLA_U32
},
633 [TCA_FLOWER_KEY_ENC_IPV6_SRC
] = { .len
= sizeof(struct in6_addr
) },
634 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
] = { .len
= sizeof(struct in6_addr
) },
635 [TCA_FLOWER_KEY_ENC_IPV6_DST
] = { .len
= sizeof(struct in6_addr
) },
636 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
] = { .len
= sizeof(struct in6_addr
) },
637 [TCA_FLOWER_KEY_TCP_SRC_MASK
] = { .type
= NLA_U16
},
638 [TCA_FLOWER_KEY_TCP_DST_MASK
] = { .type
= NLA_U16
},
639 [TCA_FLOWER_KEY_UDP_SRC_MASK
] = { .type
= NLA_U16
},
640 [TCA_FLOWER_KEY_UDP_DST_MASK
] = { .type
= NLA_U16
},
641 [TCA_FLOWER_KEY_SCTP_SRC_MASK
] = { .type
= NLA_U16
},
642 [TCA_FLOWER_KEY_SCTP_DST_MASK
] = { .type
= NLA_U16
},
643 [TCA_FLOWER_KEY_SCTP_SRC
] = { .type
= NLA_U16
},
644 [TCA_FLOWER_KEY_SCTP_DST
] = { .type
= NLA_U16
},
645 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
] = { .type
= NLA_U16
},
646 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
] = { .type
= NLA_U16
},
647 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT
] = { .type
= NLA_U16
},
648 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
] = { .type
= NLA_U16
},
649 [TCA_FLOWER_KEY_FLAGS
] = { .type
= NLA_U32
},
650 [TCA_FLOWER_KEY_FLAGS_MASK
] = { .type
= NLA_U32
},
651 [TCA_FLOWER_KEY_ICMPV4_TYPE
] = { .type
= NLA_U8
},
652 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK
] = { .type
= NLA_U8
},
653 [TCA_FLOWER_KEY_ICMPV4_CODE
] = { .type
= NLA_U8
},
654 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK
] = { .type
= NLA_U8
},
655 [TCA_FLOWER_KEY_ICMPV6_TYPE
] = { .type
= NLA_U8
},
656 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK
] = { .type
= NLA_U8
},
657 [TCA_FLOWER_KEY_ICMPV6_CODE
] = { .type
= NLA_U8
},
658 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK
] = { .type
= NLA_U8
},
659 [TCA_FLOWER_KEY_ARP_SIP
] = { .type
= NLA_U32
},
660 [TCA_FLOWER_KEY_ARP_SIP_MASK
] = { .type
= NLA_U32
},
661 [TCA_FLOWER_KEY_ARP_TIP
] = { .type
= NLA_U32
},
662 [TCA_FLOWER_KEY_ARP_TIP_MASK
] = { .type
= NLA_U32
},
663 [TCA_FLOWER_KEY_ARP_OP
] = { .type
= NLA_U8
},
664 [TCA_FLOWER_KEY_ARP_OP_MASK
] = { .type
= NLA_U8
},
665 [TCA_FLOWER_KEY_ARP_SHA
] = { .len
= ETH_ALEN
},
666 [TCA_FLOWER_KEY_ARP_SHA_MASK
] = { .len
= ETH_ALEN
},
667 [TCA_FLOWER_KEY_ARP_THA
] = { .len
= ETH_ALEN
},
668 [TCA_FLOWER_KEY_ARP_THA_MASK
] = { .len
= ETH_ALEN
},
669 [TCA_FLOWER_KEY_MPLS_TTL
] = { .type
= NLA_U8
},
670 [TCA_FLOWER_KEY_MPLS_BOS
] = { .type
= NLA_U8
},
671 [TCA_FLOWER_KEY_MPLS_TC
] = { .type
= NLA_U8
},
672 [TCA_FLOWER_KEY_MPLS_LABEL
] = { .type
= NLA_U32
},
673 [TCA_FLOWER_KEY_MPLS_OPTS
] = { .type
= NLA_NESTED
},
674 [TCA_FLOWER_KEY_TCP_FLAGS
] = { .type
= NLA_U16
},
675 [TCA_FLOWER_KEY_TCP_FLAGS_MASK
] = { .type
= NLA_U16
},
676 [TCA_FLOWER_KEY_IP_TOS
] = { .type
= NLA_U8
},
677 [TCA_FLOWER_KEY_IP_TOS_MASK
] = { .type
= NLA_U8
},
678 [TCA_FLOWER_KEY_IP_TTL
] = { .type
= NLA_U8
},
679 [TCA_FLOWER_KEY_IP_TTL_MASK
] = { .type
= NLA_U8
},
680 [TCA_FLOWER_KEY_CVLAN_ID
] = { .type
= NLA_U16
},
681 [TCA_FLOWER_KEY_CVLAN_PRIO
] = { .type
= NLA_U8
},
682 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE
] = { .type
= NLA_U16
},
683 [TCA_FLOWER_KEY_ENC_IP_TOS
] = { .type
= NLA_U8
},
684 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK
] = { .type
= NLA_U8
},
685 [TCA_FLOWER_KEY_ENC_IP_TTL
] = { .type
= NLA_U8
},
686 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK
] = { .type
= NLA_U8
},
687 [TCA_FLOWER_KEY_ENC_OPTS
] = { .type
= NLA_NESTED
},
688 [TCA_FLOWER_KEY_ENC_OPTS_MASK
] = { .type
= NLA_NESTED
},
689 [TCA_FLOWER_KEY_CT_STATE
] = { .type
= NLA_U16
},
690 [TCA_FLOWER_KEY_CT_STATE_MASK
] = { .type
= NLA_U16
},
691 [TCA_FLOWER_KEY_CT_ZONE
] = { .type
= NLA_U16
},
692 [TCA_FLOWER_KEY_CT_ZONE_MASK
] = { .type
= NLA_U16
},
693 [TCA_FLOWER_KEY_CT_MARK
] = { .type
= NLA_U32
},
694 [TCA_FLOWER_KEY_CT_MARK_MASK
] = { .type
= NLA_U32
},
695 [TCA_FLOWER_KEY_CT_LABELS
] = { .type
= NLA_BINARY
,
696 .len
= 128 / BITS_PER_BYTE
},
697 [TCA_FLOWER_KEY_CT_LABELS_MASK
] = { .type
= NLA_BINARY
,
698 .len
= 128 / BITS_PER_BYTE
},
699 [TCA_FLOWER_FLAGS
] = { .type
= NLA_U32
},
700 [TCA_FLOWER_KEY_HASH
] = { .type
= NLA_U32
},
701 [TCA_FLOWER_KEY_HASH_MASK
] = { .type
= NLA_U32
},
705 static const struct nla_policy
706 enc_opts_policy
[TCA_FLOWER_KEY_ENC_OPTS_MAX
+ 1] = {
707 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC
] = {
708 .strict_start_type
= TCA_FLOWER_KEY_ENC_OPTS_VXLAN
},
709 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE
] = { .type
= NLA_NESTED
},
710 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN
] = { .type
= NLA_NESTED
},
711 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN
] = { .type
= NLA_NESTED
},
714 static const struct nla_policy
715 geneve_opt_policy
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX
+ 1] = {
716 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
] = { .type
= NLA_U16
},
717 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
] = { .type
= NLA_U8
},
718 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
] = { .type
= NLA_BINARY
,
722 static const struct nla_policy
723 vxlan_opt_policy
[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX
+ 1] = {
724 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP
] = { .type
= NLA_U32
},
727 static const struct nla_policy
728 erspan_opt_policy
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX
+ 1] = {
729 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER
] = { .type
= NLA_U8
},
730 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX
] = { .type
= NLA_U32
},
731 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR
] = { .type
= NLA_U8
},
732 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID
] = { .type
= NLA_U8
},
735 static const struct nla_policy
736 mpls_stack_entry_policy
[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX
+ 1] = {
737 [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH
] = { .type
= NLA_U8
},
738 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL
] = { .type
= NLA_U8
},
739 [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS
] = { .type
= NLA_U8
},
740 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC
] = { .type
= NLA_U8
},
741 [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL
] = { .type
= NLA_U32
},
744 static void fl_set_key_val(struct nlattr
**tb
,
745 void *val
, int val_type
,
746 void *mask
, int mask_type
, int len
)
750 nla_memcpy(val
, tb
[val_type
], len
);
751 if (mask_type
== TCA_FLOWER_UNSPEC
|| !tb
[mask_type
])
752 memset(mask
, 0xff, len
);
754 nla_memcpy(mask
, tb
[mask_type
], len
);
757 static int fl_set_key_port_range(struct nlattr
**tb
, struct fl_flow_key
*key
,
758 struct fl_flow_key
*mask
,
759 struct netlink_ext_ack
*extack
)
761 fl_set_key_val(tb
, &key
->tp_range
.tp_min
.dst
,
762 TCA_FLOWER_KEY_PORT_DST_MIN
, &mask
->tp_range
.tp_min
.dst
,
763 TCA_FLOWER_UNSPEC
, sizeof(key
->tp_range
.tp_min
.dst
));
764 fl_set_key_val(tb
, &key
->tp_range
.tp_max
.dst
,
765 TCA_FLOWER_KEY_PORT_DST_MAX
, &mask
->tp_range
.tp_max
.dst
,
766 TCA_FLOWER_UNSPEC
, sizeof(key
->tp_range
.tp_max
.dst
));
767 fl_set_key_val(tb
, &key
->tp_range
.tp_min
.src
,
768 TCA_FLOWER_KEY_PORT_SRC_MIN
, &mask
->tp_range
.tp_min
.src
,
769 TCA_FLOWER_UNSPEC
, sizeof(key
->tp_range
.tp_min
.src
));
770 fl_set_key_val(tb
, &key
->tp_range
.tp_max
.src
,
771 TCA_FLOWER_KEY_PORT_SRC_MAX
, &mask
->tp_range
.tp_max
.src
,
772 TCA_FLOWER_UNSPEC
, sizeof(key
->tp_range
.tp_max
.src
));
774 if (mask
->tp_range
.tp_min
.dst
&& mask
->tp_range
.tp_max
.dst
&&
775 htons(key
->tp_range
.tp_max
.dst
) <=
776 htons(key
->tp_range
.tp_min
.dst
)) {
777 NL_SET_ERR_MSG_ATTR(extack
,
778 tb
[TCA_FLOWER_KEY_PORT_DST_MIN
],
779 "Invalid destination port range (min must be strictly smaller than max)");
782 if (mask
->tp_range
.tp_min
.src
&& mask
->tp_range
.tp_max
.src
&&
783 htons(key
->tp_range
.tp_max
.src
) <=
784 htons(key
->tp_range
.tp_min
.src
)) {
785 NL_SET_ERR_MSG_ATTR(extack
,
786 tb
[TCA_FLOWER_KEY_PORT_SRC_MIN
],
787 "Invalid source port range (min must be strictly smaller than max)");
794 static int fl_set_key_mpls_lse(const struct nlattr
*nla_lse
,
795 struct flow_dissector_key_mpls
*key_val
,
796 struct flow_dissector_key_mpls
*key_mask
,
797 struct netlink_ext_ack
*extack
)
799 struct nlattr
*tb
[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX
+ 1];
800 struct flow_dissector_mpls_lse
*lse_mask
;
801 struct flow_dissector_mpls_lse
*lse_val
;
806 err
= nla_parse_nested(tb
, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX
, nla_lse
,
807 mpls_stack_entry_policy
, extack
);
811 if (!tb
[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH
]) {
812 NL_SET_ERR_MSG(extack
, "Missing MPLS option \"depth\"");
816 depth
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH
]);
818 /* LSE depth starts at 1, for consistency with terminology used by
819 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
821 if (depth
< 1 || depth
> FLOW_DIS_MPLS_MAX
) {
822 NL_SET_ERR_MSG_ATTR(extack
,
823 tb
[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH
],
824 "Invalid MPLS depth");
827 lse_index
= depth
- 1;
829 dissector_set_mpls_lse(key_val
, lse_index
);
830 dissector_set_mpls_lse(key_mask
, lse_index
);
832 lse_val
= &key_val
->ls
[lse_index
];
833 lse_mask
= &key_mask
->ls
[lse_index
];
835 if (tb
[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL
]) {
836 lse_val
->mpls_ttl
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL
]);
837 lse_mask
->mpls_ttl
= MPLS_TTL_MASK
;
839 if (tb
[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS
]) {
840 u8 bos
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS
]);
842 if (bos
& ~MPLS_BOS_MASK
) {
843 NL_SET_ERR_MSG_ATTR(extack
,
844 tb
[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS
],
845 "Bottom Of Stack (BOS) must be 0 or 1");
848 lse_val
->mpls_bos
= bos
;
849 lse_mask
->mpls_bos
= MPLS_BOS_MASK
;
851 if (tb
[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC
]) {
852 u8 tc
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC
]);
854 if (tc
& ~MPLS_TC_MASK
) {
855 NL_SET_ERR_MSG_ATTR(extack
,
856 tb
[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC
],
857 "Traffic Class (TC) must be between 0 and 7");
860 lse_val
->mpls_tc
= tc
;
861 lse_mask
->mpls_tc
= MPLS_TC_MASK
;
863 if (tb
[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL
]) {
864 u32 label
= nla_get_u32(tb
[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL
]);
866 if (label
& ~MPLS_LABEL_MASK
) {
867 NL_SET_ERR_MSG_ATTR(extack
,
868 tb
[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL
],
869 "Label must be between 0 and 1048575");
872 lse_val
->mpls_label
= label
;
873 lse_mask
->mpls_label
= MPLS_LABEL_MASK
;
879 static int fl_set_key_mpls_opts(const struct nlattr
*nla_mpls_opts
,
880 struct flow_dissector_key_mpls
*key_val
,
881 struct flow_dissector_key_mpls
*key_mask
,
882 struct netlink_ext_ack
*extack
)
884 struct nlattr
*nla_lse
;
888 if (!(nla_mpls_opts
->nla_type
& NLA_F_NESTED
)) {
889 NL_SET_ERR_MSG_ATTR(extack
, nla_mpls_opts
,
890 "NLA_F_NESTED is missing");
894 nla_for_each_nested(nla_lse
, nla_mpls_opts
, rem
) {
895 if (nla_type(nla_lse
) != TCA_FLOWER_KEY_MPLS_OPTS_LSE
) {
896 NL_SET_ERR_MSG_ATTR(extack
, nla_lse
,
897 "Invalid MPLS option type");
901 err
= fl_set_key_mpls_lse(nla_lse
, key_val
, key_mask
, extack
);
906 NL_SET_ERR_MSG(extack
,
907 "Bytes leftover after parsing MPLS options");
914 static int fl_set_key_mpls(struct nlattr
**tb
,
915 struct flow_dissector_key_mpls
*key_val
,
916 struct flow_dissector_key_mpls
*key_mask
,
917 struct netlink_ext_ack
*extack
)
919 struct flow_dissector_mpls_lse
*lse_mask
;
920 struct flow_dissector_mpls_lse
*lse_val
;
922 if (tb
[TCA_FLOWER_KEY_MPLS_OPTS
]) {
923 if (tb
[TCA_FLOWER_KEY_MPLS_TTL
] ||
924 tb
[TCA_FLOWER_KEY_MPLS_BOS
] ||
925 tb
[TCA_FLOWER_KEY_MPLS_TC
] ||
926 tb
[TCA_FLOWER_KEY_MPLS_LABEL
]) {
927 NL_SET_ERR_MSG_ATTR(extack
,
928 tb
[TCA_FLOWER_KEY_MPLS_OPTS
],
929 "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
933 return fl_set_key_mpls_opts(tb
[TCA_FLOWER_KEY_MPLS_OPTS
],
934 key_val
, key_mask
, extack
);
937 lse_val
= &key_val
->ls
[0];
938 lse_mask
= &key_mask
->ls
[0];
940 if (tb
[TCA_FLOWER_KEY_MPLS_TTL
]) {
941 lse_val
->mpls_ttl
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_TTL
]);
942 lse_mask
->mpls_ttl
= MPLS_TTL_MASK
;
943 dissector_set_mpls_lse(key_val
, 0);
944 dissector_set_mpls_lse(key_mask
, 0);
946 if (tb
[TCA_FLOWER_KEY_MPLS_BOS
]) {
947 u8 bos
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_BOS
]);
949 if (bos
& ~MPLS_BOS_MASK
) {
950 NL_SET_ERR_MSG_ATTR(extack
,
951 tb
[TCA_FLOWER_KEY_MPLS_BOS
],
952 "Bottom Of Stack (BOS) must be 0 or 1");
955 lse_val
->mpls_bos
= bos
;
956 lse_mask
->mpls_bos
= MPLS_BOS_MASK
;
957 dissector_set_mpls_lse(key_val
, 0);
958 dissector_set_mpls_lse(key_mask
, 0);
960 if (tb
[TCA_FLOWER_KEY_MPLS_TC
]) {
961 u8 tc
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_TC
]);
963 if (tc
& ~MPLS_TC_MASK
) {
964 NL_SET_ERR_MSG_ATTR(extack
,
965 tb
[TCA_FLOWER_KEY_MPLS_TC
],
966 "Traffic Class (TC) must be between 0 and 7");
969 lse_val
->mpls_tc
= tc
;
970 lse_mask
->mpls_tc
= MPLS_TC_MASK
;
971 dissector_set_mpls_lse(key_val
, 0);
972 dissector_set_mpls_lse(key_mask
, 0);
974 if (tb
[TCA_FLOWER_KEY_MPLS_LABEL
]) {
975 u32 label
= nla_get_u32(tb
[TCA_FLOWER_KEY_MPLS_LABEL
]);
977 if (label
& ~MPLS_LABEL_MASK
) {
978 NL_SET_ERR_MSG_ATTR(extack
,
979 tb
[TCA_FLOWER_KEY_MPLS_LABEL
],
980 "Label must be between 0 and 1048575");
983 lse_val
->mpls_label
= label
;
984 lse_mask
->mpls_label
= MPLS_LABEL_MASK
;
985 dissector_set_mpls_lse(key_val
, 0);
986 dissector_set_mpls_lse(key_mask
, 0);
991 static void fl_set_key_vlan(struct nlattr
**tb
,
993 int vlan_id_key
, int vlan_prio_key
,
994 struct flow_dissector_key_vlan
*key_val
,
995 struct flow_dissector_key_vlan
*key_mask
)
997 #define VLAN_PRIORITY_MASK 0x7
999 if (tb
[vlan_id_key
]) {
1001 nla_get_u16(tb
[vlan_id_key
]) & VLAN_VID_MASK
;
1002 key_mask
->vlan_id
= VLAN_VID_MASK
;
1004 if (tb
[vlan_prio_key
]) {
1005 key_val
->vlan_priority
=
1006 nla_get_u8(tb
[vlan_prio_key
]) &
1008 key_mask
->vlan_priority
= VLAN_PRIORITY_MASK
;
1010 key_val
->vlan_tpid
= ethertype
;
1011 key_mask
->vlan_tpid
= cpu_to_be16(~0);
1014 static void fl_set_key_flag(u32 flower_key
, u32 flower_mask
,
1015 u32
*dissector_key
, u32
*dissector_mask
,
1016 u32 flower_flag_bit
, u32 dissector_flag_bit
)
1018 if (flower_mask
& flower_flag_bit
) {
1019 *dissector_mask
|= dissector_flag_bit
;
1020 if (flower_key
& flower_flag_bit
)
1021 *dissector_key
|= dissector_flag_bit
;
1025 static int fl_set_key_flags(struct nlattr
**tb
, u32
*flags_key
,
1026 u32
*flags_mask
, struct netlink_ext_ack
*extack
)
1030 /* mask is mandatory for flags */
1031 if (!tb
[TCA_FLOWER_KEY_FLAGS_MASK
]) {
1032 NL_SET_ERR_MSG(extack
, "Missing flags mask");
1036 key
= be32_to_cpu(nla_get_u32(tb
[TCA_FLOWER_KEY_FLAGS
]));
1037 mask
= be32_to_cpu(nla_get_u32(tb
[TCA_FLOWER_KEY_FLAGS_MASK
]));
1042 fl_set_key_flag(key
, mask
, flags_key
, flags_mask
,
1043 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT
, FLOW_DIS_IS_FRAGMENT
);
1044 fl_set_key_flag(key
, mask
, flags_key
, flags_mask
,
1045 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST
,
1046 FLOW_DIS_FIRST_FRAG
);
1051 static void fl_set_key_ip(struct nlattr
**tb
, bool encap
,
1052 struct flow_dissector_key_ip
*key
,
1053 struct flow_dissector_key_ip
*mask
)
1055 int tos_key
= encap
? TCA_FLOWER_KEY_ENC_IP_TOS
: TCA_FLOWER_KEY_IP_TOS
;
1056 int ttl_key
= encap
? TCA_FLOWER_KEY_ENC_IP_TTL
: TCA_FLOWER_KEY_IP_TTL
;
1057 int tos_mask
= encap
? TCA_FLOWER_KEY_ENC_IP_TOS_MASK
: TCA_FLOWER_KEY_IP_TOS_MASK
;
1058 int ttl_mask
= encap
? TCA_FLOWER_KEY_ENC_IP_TTL_MASK
: TCA_FLOWER_KEY_IP_TTL_MASK
;
1060 fl_set_key_val(tb
, &key
->tos
, tos_key
, &mask
->tos
, tos_mask
, sizeof(key
->tos
));
1061 fl_set_key_val(tb
, &key
->ttl
, ttl_key
, &mask
->ttl
, ttl_mask
, sizeof(key
->ttl
));
1064 static int fl_set_geneve_opt(const struct nlattr
*nla
, struct fl_flow_key
*key
,
1065 int depth
, int option_len
,
1066 struct netlink_ext_ack
*extack
)
1068 struct nlattr
*tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX
+ 1];
1069 struct nlattr
*class = NULL
, *type
= NULL
, *data
= NULL
;
1070 struct geneve_opt
*opt
;
1071 int err
, data_len
= 0;
1073 if (option_len
> sizeof(struct geneve_opt
))
1074 data_len
= option_len
- sizeof(struct geneve_opt
);
1076 opt
= (struct geneve_opt
*)&key
->enc_opts
.data
[key
->enc_opts
.len
];
1077 memset(opt
, 0xff, option_len
);
1078 opt
->length
= data_len
/ 4;
1083 /* If no mask has been prodived we assume an exact match. */
1085 return sizeof(struct geneve_opt
) + data_len
;
1087 if (nla_type(nla
) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE
) {
1088 NL_SET_ERR_MSG(extack
, "Non-geneve option type for mask");
1092 err
= nla_parse_nested_deprecated(tb
,
1093 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX
,
1094 nla
, geneve_opt_policy
, extack
);
1098 /* We are not allowed to omit any of CLASS, TYPE or DATA
1099 * fields from the key.
1102 (!tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
] ||
1103 !tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
] ||
1104 !tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
])) {
1105 NL_SET_ERR_MSG(extack
, "Missing tunnel key geneve option class, type or data");
1109 /* Omitting any of CLASS, TYPE or DATA fields is allowed
1112 if (tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
]) {
1113 int new_len
= key
->enc_opts
.len
;
1115 data
= tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
];
1116 data_len
= nla_len(data
);
1118 NL_SET_ERR_MSG(extack
, "Tunnel key geneve option data is less than 4 bytes long");
1122 NL_SET_ERR_MSG(extack
, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1126 new_len
+= sizeof(struct geneve_opt
) + data_len
;
1127 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX
!= IP_TUNNEL_OPTS_MAX
);
1128 if (new_len
> FLOW_DIS_TUN_OPTS_MAX
) {
1129 NL_SET_ERR_MSG(extack
, "Tunnel options exceeds max size");
1132 opt
->length
= data_len
/ 4;
1133 memcpy(opt
->opt_data
, nla_data(data
), data_len
);
1136 if (tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
]) {
1137 class = tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
];
1138 opt
->opt_class
= nla_get_be16(class);
1141 if (tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
]) {
1142 type
= tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
];
1143 opt
->type
= nla_get_u8(type
);
1146 return sizeof(struct geneve_opt
) + data_len
;
1149 static int fl_set_vxlan_opt(const struct nlattr
*nla
, struct fl_flow_key
*key
,
1150 int depth
, int option_len
,
1151 struct netlink_ext_ack
*extack
)
1153 struct nlattr
*tb
[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX
+ 1];
1154 struct vxlan_metadata
*md
;
1157 md
= (struct vxlan_metadata
*)&key
->enc_opts
.data
[key
->enc_opts
.len
];
1158 memset(md
, 0xff, sizeof(*md
));
1163 if (nla_type(nla
) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN
) {
1164 NL_SET_ERR_MSG(extack
, "Non-vxlan option type for mask");
1168 err
= nla_parse_nested(tb
, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX
, nla
,
1169 vxlan_opt_policy
, extack
);
1173 if (!option_len
&& !tb
[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP
]) {
1174 NL_SET_ERR_MSG(extack
, "Missing tunnel key vxlan option gbp");
1178 if (tb
[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP
]) {
1179 md
->gbp
= nla_get_u32(tb
[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP
]);
1180 md
->gbp
&= VXLAN_GBP_MASK
;
1186 static int fl_set_erspan_opt(const struct nlattr
*nla
, struct fl_flow_key
*key
,
1187 int depth
, int option_len
,
1188 struct netlink_ext_ack
*extack
)
1190 struct nlattr
*tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX
+ 1];
1191 struct erspan_metadata
*md
;
1194 md
= (struct erspan_metadata
*)&key
->enc_opts
.data
[key
->enc_opts
.len
];
1195 memset(md
, 0xff, sizeof(*md
));
1201 if (nla_type(nla
) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN
) {
1202 NL_SET_ERR_MSG(extack
, "Non-erspan option type for mask");
1206 err
= nla_parse_nested(tb
, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX
, nla
,
1207 erspan_opt_policy
, extack
);
1211 if (!option_len
&& !tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER
]) {
1212 NL_SET_ERR_MSG(extack
, "Missing tunnel key erspan option ver");
1216 if (tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER
])
1217 md
->version
= nla_get_u8(tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER
]);
1219 if (md
->version
== 1) {
1220 if (!option_len
&& !tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX
]) {
1221 NL_SET_ERR_MSG(extack
, "Missing tunnel key erspan option index");
1224 if (tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX
]) {
1225 nla
= tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX
];
1226 memset(&md
->u
, 0x00, sizeof(md
->u
));
1227 md
->u
.index
= nla_get_be32(nla
);
1229 } else if (md
->version
== 2) {
1230 if (!option_len
&& (!tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR
] ||
1231 !tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID
])) {
1232 NL_SET_ERR_MSG(extack
, "Missing tunnel key erspan option dir or hwid");
1235 if (tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR
]) {
1236 nla
= tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR
];
1237 md
->u
.md2
.dir
= nla_get_u8(nla
);
1239 if (tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID
]) {
1240 nla
= tb
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID
];
1241 set_hwid(&md
->u
.md2
, nla_get_u8(nla
));
1244 NL_SET_ERR_MSG(extack
, "Tunnel key erspan option ver is incorrect");
1251 static int fl_set_enc_opt(struct nlattr
**tb
, struct fl_flow_key
*key
,
1252 struct fl_flow_key
*mask
,
1253 struct netlink_ext_ack
*extack
)
1255 const struct nlattr
*nla_enc_key
, *nla_opt_key
, *nla_opt_msk
= NULL
;
1256 int err
, option_len
, key_depth
, msk_depth
= 0;
1258 err
= nla_validate_nested_deprecated(tb
[TCA_FLOWER_KEY_ENC_OPTS
],
1259 TCA_FLOWER_KEY_ENC_OPTS_MAX
,
1260 enc_opts_policy
, extack
);
1264 nla_enc_key
= nla_data(tb
[TCA_FLOWER_KEY_ENC_OPTS
]);
1266 if (tb
[TCA_FLOWER_KEY_ENC_OPTS_MASK
]) {
1267 err
= nla_validate_nested_deprecated(tb
[TCA_FLOWER_KEY_ENC_OPTS_MASK
],
1268 TCA_FLOWER_KEY_ENC_OPTS_MAX
,
1269 enc_opts_policy
, extack
);
1273 nla_opt_msk
= nla_data(tb
[TCA_FLOWER_KEY_ENC_OPTS_MASK
]);
1274 msk_depth
= nla_len(tb
[TCA_FLOWER_KEY_ENC_OPTS_MASK
]);
1277 nla_for_each_attr(nla_opt_key
, nla_enc_key
,
1278 nla_len(tb
[TCA_FLOWER_KEY_ENC_OPTS
]), key_depth
) {
1279 switch (nla_type(nla_opt_key
)) {
1280 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE
:
1281 if (key
->enc_opts
.dst_opt_type
&&
1282 key
->enc_opts
.dst_opt_type
!= TUNNEL_GENEVE_OPT
) {
1283 NL_SET_ERR_MSG(extack
, "Duplicate type for geneve options");
1287 key
->enc_opts
.dst_opt_type
= TUNNEL_GENEVE_OPT
;
1288 option_len
= fl_set_geneve_opt(nla_opt_key
, key
,
1289 key_depth
, option_len
,
1294 key
->enc_opts
.len
+= option_len
;
1295 /* At the same time we need to parse through the mask
1296 * in order to verify exact and mask attribute lengths.
1298 mask
->enc_opts
.dst_opt_type
= TUNNEL_GENEVE_OPT
;
1299 option_len
= fl_set_geneve_opt(nla_opt_msk
, mask
,
1300 msk_depth
, option_len
,
1305 mask
->enc_opts
.len
+= option_len
;
1306 if (key
->enc_opts
.len
!= mask
->enc_opts
.len
) {
1307 NL_SET_ERR_MSG(extack
, "Key and mask miss aligned");
1312 nla_opt_msk
= nla_next(nla_opt_msk
, &msk_depth
);
1314 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN
:
1315 if (key
->enc_opts
.dst_opt_type
) {
1316 NL_SET_ERR_MSG(extack
, "Duplicate type for vxlan options");
1320 key
->enc_opts
.dst_opt_type
= TUNNEL_VXLAN_OPT
;
1321 option_len
= fl_set_vxlan_opt(nla_opt_key
, key
,
1322 key_depth
, option_len
,
1327 key
->enc_opts
.len
+= option_len
;
1328 /* At the same time we need to parse through the mask
1329 * in order to verify exact and mask attribute lengths.
1331 mask
->enc_opts
.dst_opt_type
= TUNNEL_VXLAN_OPT
;
1332 option_len
= fl_set_vxlan_opt(nla_opt_msk
, mask
,
1333 msk_depth
, option_len
,
1338 mask
->enc_opts
.len
+= option_len
;
1339 if (key
->enc_opts
.len
!= mask
->enc_opts
.len
) {
1340 NL_SET_ERR_MSG(extack
, "Key and mask miss aligned");
1345 nla_opt_msk
= nla_next(nla_opt_msk
, &msk_depth
);
1347 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN
:
1348 if (key
->enc_opts
.dst_opt_type
) {
1349 NL_SET_ERR_MSG(extack
, "Duplicate type for erspan options");
1353 key
->enc_opts
.dst_opt_type
= TUNNEL_ERSPAN_OPT
;
1354 option_len
= fl_set_erspan_opt(nla_opt_key
, key
,
1355 key_depth
, option_len
,
1360 key
->enc_opts
.len
+= option_len
;
1361 /* At the same time we need to parse through the mask
1362 * in order to verify exact and mask attribute lengths.
1364 mask
->enc_opts
.dst_opt_type
= TUNNEL_ERSPAN_OPT
;
1365 option_len
= fl_set_erspan_opt(nla_opt_msk
, mask
,
1366 msk_depth
, option_len
,
1371 mask
->enc_opts
.len
+= option_len
;
1372 if (key
->enc_opts
.len
!= mask
->enc_opts
.len
) {
1373 NL_SET_ERR_MSG(extack
, "Key and mask miss aligned");
1378 nla_opt_msk
= nla_next(nla_opt_msk
, &msk_depth
);
1381 NL_SET_ERR_MSG(extack
, "Unknown tunnel option type");
1389 static int fl_set_key_ct(struct nlattr
**tb
,
1390 struct flow_dissector_key_ct
*key
,
1391 struct flow_dissector_key_ct
*mask
,
1392 struct netlink_ext_ack
*extack
)
1394 if (tb
[TCA_FLOWER_KEY_CT_STATE
]) {
1395 if (!IS_ENABLED(CONFIG_NF_CONNTRACK
)) {
1396 NL_SET_ERR_MSG(extack
, "Conntrack isn't enabled");
1399 fl_set_key_val(tb
, &key
->ct_state
, TCA_FLOWER_KEY_CT_STATE
,
1400 &mask
->ct_state
, TCA_FLOWER_KEY_CT_STATE_MASK
,
1401 sizeof(key
->ct_state
));
1403 if (tb
[TCA_FLOWER_KEY_CT_ZONE
]) {
1404 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES
)) {
1405 NL_SET_ERR_MSG(extack
, "Conntrack zones isn't enabled");
1408 fl_set_key_val(tb
, &key
->ct_zone
, TCA_FLOWER_KEY_CT_ZONE
,
1409 &mask
->ct_zone
, TCA_FLOWER_KEY_CT_ZONE_MASK
,
1410 sizeof(key
->ct_zone
));
1412 if (tb
[TCA_FLOWER_KEY_CT_MARK
]) {
1413 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK
)) {
1414 NL_SET_ERR_MSG(extack
, "Conntrack mark isn't enabled");
1417 fl_set_key_val(tb
, &key
->ct_mark
, TCA_FLOWER_KEY_CT_MARK
,
1418 &mask
->ct_mark
, TCA_FLOWER_KEY_CT_MARK_MASK
,
1419 sizeof(key
->ct_mark
));
1421 if (tb
[TCA_FLOWER_KEY_CT_LABELS
]) {
1422 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS
)) {
1423 NL_SET_ERR_MSG(extack
, "Conntrack labels aren't enabled");
1426 fl_set_key_val(tb
, key
->ct_labels
, TCA_FLOWER_KEY_CT_LABELS
,
1427 mask
->ct_labels
, TCA_FLOWER_KEY_CT_LABELS_MASK
,
1428 sizeof(key
->ct_labels
));
1434 static int fl_set_key(struct net
*net
, struct nlattr
**tb
,
1435 struct fl_flow_key
*key
, struct fl_flow_key
*mask
,
1436 struct netlink_ext_ack
*extack
)
1441 if (tb
[TCA_FLOWER_INDEV
]) {
1442 int err
= tcf_change_indev(net
, tb
[TCA_FLOWER_INDEV
], extack
);
1445 key
->meta
.ingress_ifindex
= err
;
1446 mask
->meta
.ingress_ifindex
= 0xffffffff;
1449 fl_set_key_val(tb
, key
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST
,
1450 mask
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST_MASK
,
1451 sizeof(key
->eth
.dst
));
1452 fl_set_key_val(tb
, key
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC
,
1453 mask
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC_MASK
,
1454 sizeof(key
->eth
.src
));
1456 if (tb
[TCA_FLOWER_KEY_ETH_TYPE
]) {
1457 ethertype
= nla_get_be16(tb
[TCA_FLOWER_KEY_ETH_TYPE
]);
1459 if (eth_type_vlan(ethertype
)) {
1460 fl_set_key_vlan(tb
, ethertype
, TCA_FLOWER_KEY_VLAN_ID
,
1461 TCA_FLOWER_KEY_VLAN_PRIO
, &key
->vlan
,
1464 if (tb
[TCA_FLOWER_KEY_VLAN_ETH_TYPE
]) {
1465 ethertype
= nla_get_be16(tb
[TCA_FLOWER_KEY_VLAN_ETH_TYPE
]);
1466 if (eth_type_vlan(ethertype
)) {
1467 fl_set_key_vlan(tb
, ethertype
,
1468 TCA_FLOWER_KEY_CVLAN_ID
,
1469 TCA_FLOWER_KEY_CVLAN_PRIO
,
1470 &key
->cvlan
, &mask
->cvlan
);
1471 fl_set_key_val(tb
, &key
->basic
.n_proto
,
1472 TCA_FLOWER_KEY_CVLAN_ETH_TYPE
,
1473 &mask
->basic
.n_proto
,
1475 sizeof(key
->basic
.n_proto
));
1477 key
->basic
.n_proto
= ethertype
;
1478 mask
->basic
.n_proto
= cpu_to_be16(~0);
1482 key
->basic
.n_proto
= ethertype
;
1483 mask
->basic
.n_proto
= cpu_to_be16(~0);
1487 if (key
->basic
.n_proto
== htons(ETH_P_IP
) ||
1488 key
->basic
.n_proto
== htons(ETH_P_IPV6
)) {
1489 fl_set_key_val(tb
, &key
->basic
.ip_proto
, TCA_FLOWER_KEY_IP_PROTO
,
1490 &mask
->basic
.ip_proto
, TCA_FLOWER_UNSPEC
,
1491 sizeof(key
->basic
.ip_proto
));
1492 fl_set_key_ip(tb
, false, &key
->ip
, &mask
->ip
);
1495 if (tb
[TCA_FLOWER_KEY_IPV4_SRC
] || tb
[TCA_FLOWER_KEY_IPV4_DST
]) {
1496 key
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
1497 mask
->control
.addr_type
= ~0;
1498 fl_set_key_val(tb
, &key
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC
,
1499 &mask
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC_MASK
,
1500 sizeof(key
->ipv4
.src
));
1501 fl_set_key_val(tb
, &key
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST
,
1502 &mask
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST_MASK
,
1503 sizeof(key
->ipv4
.dst
));
1504 } else if (tb
[TCA_FLOWER_KEY_IPV6_SRC
] || tb
[TCA_FLOWER_KEY_IPV6_DST
]) {
1505 key
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
1506 mask
->control
.addr_type
= ~0;
1507 fl_set_key_val(tb
, &key
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC
,
1508 &mask
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC_MASK
,
1509 sizeof(key
->ipv6
.src
));
1510 fl_set_key_val(tb
, &key
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST
,
1511 &mask
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST_MASK
,
1512 sizeof(key
->ipv6
.dst
));
1515 if (key
->basic
.ip_proto
== IPPROTO_TCP
) {
1516 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC
,
1517 &mask
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC_MASK
,
1518 sizeof(key
->tp
.src
));
1519 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST
,
1520 &mask
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST_MASK
,
1521 sizeof(key
->tp
.dst
));
1522 fl_set_key_val(tb
, &key
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS
,
1523 &mask
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS_MASK
,
1524 sizeof(key
->tcp
.flags
));
1525 } else if (key
->basic
.ip_proto
== IPPROTO_UDP
) {
1526 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC
,
1527 &mask
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC_MASK
,
1528 sizeof(key
->tp
.src
));
1529 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST
,
1530 &mask
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST_MASK
,
1531 sizeof(key
->tp
.dst
));
1532 } else if (key
->basic
.ip_proto
== IPPROTO_SCTP
) {
1533 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC
,
1534 &mask
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC_MASK
,
1535 sizeof(key
->tp
.src
));
1536 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST
,
1537 &mask
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST_MASK
,
1538 sizeof(key
->tp
.dst
));
1539 } else if (key
->basic
.n_proto
== htons(ETH_P_IP
) &&
1540 key
->basic
.ip_proto
== IPPROTO_ICMP
) {
1541 fl_set_key_val(tb
, &key
->icmp
.type
, TCA_FLOWER_KEY_ICMPV4_TYPE
,
1543 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK
,
1544 sizeof(key
->icmp
.type
));
1545 fl_set_key_val(tb
, &key
->icmp
.code
, TCA_FLOWER_KEY_ICMPV4_CODE
,
1547 TCA_FLOWER_KEY_ICMPV4_CODE_MASK
,
1548 sizeof(key
->icmp
.code
));
1549 } else if (key
->basic
.n_proto
== htons(ETH_P_IPV6
) &&
1550 key
->basic
.ip_proto
== IPPROTO_ICMPV6
) {
1551 fl_set_key_val(tb
, &key
->icmp
.type
, TCA_FLOWER_KEY_ICMPV6_TYPE
,
1553 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK
,
1554 sizeof(key
->icmp
.type
));
1555 fl_set_key_val(tb
, &key
->icmp
.code
, TCA_FLOWER_KEY_ICMPV6_CODE
,
1557 TCA_FLOWER_KEY_ICMPV6_CODE_MASK
,
1558 sizeof(key
->icmp
.code
));
1559 } else if (key
->basic
.n_proto
== htons(ETH_P_MPLS_UC
) ||
1560 key
->basic
.n_proto
== htons(ETH_P_MPLS_MC
)) {
1561 ret
= fl_set_key_mpls(tb
, &key
->mpls
, &mask
->mpls
, extack
);
1564 } else if (key
->basic
.n_proto
== htons(ETH_P_ARP
) ||
1565 key
->basic
.n_proto
== htons(ETH_P_RARP
)) {
1566 fl_set_key_val(tb
, &key
->arp
.sip
, TCA_FLOWER_KEY_ARP_SIP
,
1567 &mask
->arp
.sip
, TCA_FLOWER_KEY_ARP_SIP_MASK
,
1568 sizeof(key
->arp
.sip
));
1569 fl_set_key_val(tb
, &key
->arp
.tip
, TCA_FLOWER_KEY_ARP_TIP
,
1570 &mask
->arp
.tip
, TCA_FLOWER_KEY_ARP_TIP_MASK
,
1571 sizeof(key
->arp
.tip
));
1572 fl_set_key_val(tb
, &key
->arp
.op
, TCA_FLOWER_KEY_ARP_OP
,
1573 &mask
->arp
.op
, TCA_FLOWER_KEY_ARP_OP_MASK
,
1574 sizeof(key
->arp
.op
));
1575 fl_set_key_val(tb
, key
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA
,
1576 mask
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA_MASK
,
1577 sizeof(key
->arp
.sha
));
1578 fl_set_key_val(tb
, key
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA
,
1579 mask
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA_MASK
,
1580 sizeof(key
->arp
.tha
));
1583 if (key
->basic
.ip_proto
== IPPROTO_TCP
||
1584 key
->basic
.ip_proto
== IPPROTO_UDP
||
1585 key
->basic
.ip_proto
== IPPROTO_SCTP
) {
1586 ret
= fl_set_key_port_range(tb
, key
, mask
, extack
);
1591 if (tb
[TCA_FLOWER_KEY_ENC_IPV4_SRC
] ||
1592 tb
[TCA_FLOWER_KEY_ENC_IPV4_DST
]) {
1593 key
->enc_control
.addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
1594 mask
->enc_control
.addr_type
= ~0;
1595 fl_set_key_val(tb
, &key
->enc_ipv4
.src
,
1596 TCA_FLOWER_KEY_ENC_IPV4_SRC
,
1597 &mask
->enc_ipv4
.src
,
1598 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
,
1599 sizeof(key
->enc_ipv4
.src
));
1600 fl_set_key_val(tb
, &key
->enc_ipv4
.dst
,
1601 TCA_FLOWER_KEY_ENC_IPV4_DST
,
1602 &mask
->enc_ipv4
.dst
,
1603 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
,
1604 sizeof(key
->enc_ipv4
.dst
));
1607 if (tb
[TCA_FLOWER_KEY_ENC_IPV6_SRC
] ||
1608 tb
[TCA_FLOWER_KEY_ENC_IPV6_DST
]) {
1609 key
->enc_control
.addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
1610 mask
->enc_control
.addr_type
= ~0;
1611 fl_set_key_val(tb
, &key
->enc_ipv6
.src
,
1612 TCA_FLOWER_KEY_ENC_IPV6_SRC
,
1613 &mask
->enc_ipv6
.src
,
1614 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
,
1615 sizeof(key
->enc_ipv6
.src
));
1616 fl_set_key_val(tb
, &key
->enc_ipv6
.dst
,
1617 TCA_FLOWER_KEY_ENC_IPV6_DST
,
1618 &mask
->enc_ipv6
.dst
,
1619 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
,
1620 sizeof(key
->enc_ipv6
.dst
));
1623 fl_set_key_val(tb
, &key
->enc_key_id
.keyid
, TCA_FLOWER_KEY_ENC_KEY_ID
,
1624 &mask
->enc_key_id
.keyid
, TCA_FLOWER_UNSPEC
,
1625 sizeof(key
->enc_key_id
.keyid
));
1627 fl_set_key_val(tb
, &key
->enc_tp
.src
, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
,
1628 &mask
->enc_tp
.src
, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
,
1629 sizeof(key
->enc_tp
.src
));
1631 fl_set_key_val(tb
, &key
->enc_tp
.dst
, TCA_FLOWER_KEY_ENC_UDP_DST_PORT
,
1632 &mask
->enc_tp
.dst
, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
,
1633 sizeof(key
->enc_tp
.dst
));
1635 fl_set_key_ip(tb
, true, &key
->enc_ip
, &mask
->enc_ip
);
1637 fl_set_key_val(tb
, &key
->hash
.hash
, TCA_FLOWER_KEY_HASH
,
1638 &mask
->hash
.hash
, TCA_FLOWER_KEY_HASH_MASK
,
1639 sizeof(key
->hash
.hash
));
1641 if (tb
[TCA_FLOWER_KEY_ENC_OPTS
]) {
1642 ret
= fl_set_enc_opt(tb
, key
, mask
, extack
);
1647 ret
= fl_set_key_ct(tb
, &key
->ct
, &mask
->ct
, extack
);
1651 if (tb
[TCA_FLOWER_KEY_FLAGS
])
1652 ret
= fl_set_key_flags(tb
, &key
->control
.flags
,
1653 &mask
->control
.flags
, extack
);
1658 static void fl_mask_copy(struct fl_flow_mask
*dst
,
1659 struct fl_flow_mask
*src
)
1661 const void *psrc
= fl_key_get_start(&src
->key
, src
);
1662 void *pdst
= fl_key_get_start(&dst
->key
, src
);
1664 memcpy(pdst
, psrc
, fl_mask_range(src
));
1665 dst
->range
= src
->range
;
1668 static const struct rhashtable_params fl_ht_params
= {
1669 .key_offset
= offsetof(struct cls_fl_filter
, mkey
), /* base offset */
1670 .head_offset
= offsetof(struct cls_fl_filter
, ht_node
),
1671 .automatic_shrinking
= true,
1674 static int fl_init_mask_hashtable(struct fl_flow_mask
*mask
)
1676 mask
->filter_ht_params
= fl_ht_params
;
1677 mask
->filter_ht_params
.key_len
= fl_mask_range(mask
);
1678 mask
->filter_ht_params
.key_offset
+= mask
->range
.start
;
1680 return rhashtable_init(&mask
->ht
, &mask
->filter_ht_params
);
1683 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1684 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1686 #define FL_KEY_IS_MASKED(mask, member) \
1687 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1688 0, FL_KEY_MEMBER_SIZE(member)) \
1690 #define FL_KEY_SET(keys, cnt, id, member) \
1692 keys[cnt].key_id = id; \
1693 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1697 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
1699 if (FL_KEY_IS_MASKED(mask, member)) \
1700 FL_KEY_SET(keys, cnt, id, member); \
1703 static void fl_init_dissector(struct flow_dissector
*dissector
,
1704 struct fl_flow_key
*mask
)
1706 struct flow_dissector_key keys
[FLOW_DISSECTOR_KEY_MAX
];
1709 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1710 FLOW_DISSECTOR_KEY_META
, meta
);
1711 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_CONTROL
, control
);
1712 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_BASIC
, basic
);
1713 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1714 FLOW_DISSECTOR_KEY_ETH_ADDRS
, eth
);
1715 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1716 FLOW_DISSECTOR_KEY_IPV4_ADDRS
, ipv4
);
1717 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1718 FLOW_DISSECTOR_KEY_IPV6_ADDRS
, ipv6
);
1719 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1720 FLOW_DISSECTOR_KEY_PORTS
, tp
);
1721 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1722 FLOW_DISSECTOR_KEY_PORTS_RANGE
, tp_range
);
1723 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1724 FLOW_DISSECTOR_KEY_IP
, ip
);
1725 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1726 FLOW_DISSECTOR_KEY_TCP
, tcp
);
1727 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1728 FLOW_DISSECTOR_KEY_ICMP
, icmp
);
1729 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1730 FLOW_DISSECTOR_KEY_ARP
, arp
);
1731 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1732 FLOW_DISSECTOR_KEY_MPLS
, mpls
);
1733 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1734 FLOW_DISSECTOR_KEY_VLAN
, vlan
);
1735 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1736 FLOW_DISSECTOR_KEY_CVLAN
, cvlan
);
1737 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1738 FLOW_DISSECTOR_KEY_ENC_KEYID
, enc_key_id
);
1739 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1740 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
, enc_ipv4
);
1741 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1742 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
, enc_ipv6
);
1743 if (FL_KEY_IS_MASKED(mask
, enc_ipv4
) ||
1744 FL_KEY_IS_MASKED(mask
, enc_ipv6
))
1745 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_ENC_CONTROL
,
1747 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1748 FLOW_DISSECTOR_KEY_ENC_PORTS
, enc_tp
);
1749 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1750 FLOW_DISSECTOR_KEY_ENC_IP
, enc_ip
);
1751 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1752 FLOW_DISSECTOR_KEY_ENC_OPTS
, enc_opts
);
1753 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1754 FLOW_DISSECTOR_KEY_CT
, ct
);
1755 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1756 FLOW_DISSECTOR_KEY_HASH
, hash
);
1758 skb_flow_dissector_init(dissector
, keys
, cnt
);
1761 static struct fl_flow_mask
*fl_create_new_mask(struct cls_fl_head
*head
,
1762 struct fl_flow_mask
*mask
)
1764 struct fl_flow_mask
*newmask
;
1767 newmask
= kzalloc(sizeof(*newmask
), GFP_KERNEL
);
1769 return ERR_PTR(-ENOMEM
);
1771 fl_mask_copy(newmask
, mask
);
1773 if ((newmask
->key
.tp_range
.tp_min
.dst
&&
1774 newmask
->key
.tp_range
.tp_max
.dst
) ||
1775 (newmask
->key
.tp_range
.tp_min
.src
&&
1776 newmask
->key
.tp_range
.tp_max
.src
))
1777 newmask
->flags
|= TCA_FLOWER_MASK_FLAGS_RANGE
;
1779 err
= fl_init_mask_hashtable(newmask
);
1783 fl_init_dissector(&newmask
->dissector
, &newmask
->key
);
1785 INIT_LIST_HEAD_RCU(&newmask
->filters
);
1787 refcount_set(&newmask
->refcnt
, 1);
1788 err
= rhashtable_replace_fast(&head
->ht
, &mask
->ht_node
,
1789 &newmask
->ht_node
, mask_ht_params
);
1791 goto errout_destroy
;
1793 spin_lock(&head
->masks_lock
);
1794 list_add_tail_rcu(&newmask
->list
, &head
->masks
);
1795 spin_unlock(&head
->masks_lock
);
1800 rhashtable_destroy(&newmask
->ht
);
1804 return ERR_PTR(err
);
1807 static int fl_check_assign_mask(struct cls_fl_head
*head
,
1808 struct cls_fl_filter
*fnew
,
1809 struct cls_fl_filter
*fold
,
1810 struct fl_flow_mask
*mask
)
1812 struct fl_flow_mask
*newmask
;
1817 /* Insert mask as temporary node to prevent concurrent creation of mask
1818 * with same key. Any concurrent lookups with same key will return
1819 * -EAGAIN because mask's refcnt is zero.
1821 fnew
->mask
= rhashtable_lookup_get_insert_fast(&head
->ht
,
1829 goto errout_cleanup
;
1832 newmask
= fl_create_new_mask(head
, mask
);
1833 if (IS_ERR(newmask
)) {
1834 ret
= PTR_ERR(newmask
);
1835 goto errout_cleanup
;
1838 fnew
->mask
= newmask
;
1840 } else if (IS_ERR(fnew
->mask
)) {
1841 ret
= PTR_ERR(fnew
->mask
);
1842 } else if (fold
&& fold
->mask
!= fnew
->mask
) {
1844 } else if (!refcount_inc_not_zero(&fnew
->mask
->refcnt
)) {
1845 /* Mask was deleted concurrently, try again */
1852 rhashtable_remove_fast(&head
->ht
, &mask
->ht_node
,
1857 static int fl_set_parms(struct net
*net
, struct tcf_proto
*tp
,
1858 struct cls_fl_filter
*f
, struct fl_flow_mask
*mask
,
1859 unsigned long base
, struct nlattr
**tb
,
1860 struct nlattr
*est
, bool ovr
,
1861 struct fl_flow_tmplt
*tmplt
, bool rtnl_held
,
1862 struct netlink_ext_ack
*extack
)
1866 err
= tcf_exts_validate(net
, tp
, tb
, est
, &f
->exts
, ovr
, rtnl_held
,
1871 if (tb
[TCA_FLOWER_CLASSID
]) {
1872 f
->res
.classid
= nla_get_u32(tb
[TCA_FLOWER_CLASSID
]);
1875 tcf_bind_filter(tp
, &f
->res
, base
);
1880 err
= fl_set_key(net
, tb
, &f
->key
, &mask
->key
, extack
);
1884 fl_mask_update_range(mask
);
1885 fl_set_masked_key(&f
->mkey
, &f
->key
, mask
);
1887 if (!fl_mask_fits_tmplt(tmplt
, mask
)) {
1888 NL_SET_ERR_MSG_MOD(extack
, "Mask does not fit the template");
1895 static int fl_ht_insert_unique(struct cls_fl_filter
*fnew
,
1896 struct cls_fl_filter
*fold
,
1899 struct fl_flow_mask
*mask
= fnew
->mask
;
1902 err
= rhashtable_lookup_insert_fast(&mask
->ht
,
1904 mask
->filter_ht_params
);
1907 /* It is okay if filter with same key exists when
1910 return fold
&& err
== -EEXIST
? 0 : err
;
1917 static int fl_change(struct net
*net
, struct sk_buff
*in_skb
,
1918 struct tcf_proto
*tp
, unsigned long base
,
1919 u32 handle
, struct nlattr
**tca
,
1920 void **arg
, bool ovr
, bool rtnl_held
,
1921 struct netlink_ext_ack
*extack
)
1923 struct cls_fl_head
*head
= fl_head_dereference(tp
);
1924 struct cls_fl_filter
*fold
= *arg
;
1925 struct cls_fl_filter
*fnew
;
1926 struct fl_flow_mask
*mask
;
1931 if (!tca
[TCA_OPTIONS
]) {
1936 mask
= kzalloc(sizeof(struct fl_flow_mask
), GFP_KERNEL
);
1942 tb
= kcalloc(TCA_FLOWER_MAX
+ 1, sizeof(struct nlattr
*), GFP_KERNEL
);
1945 goto errout_mask_alloc
;
1948 err
= nla_parse_nested_deprecated(tb
, TCA_FLOWER_MAX
,
1949 tca
[TCA_OPTIONS
], fl_policy
, NULL
);
1953 if (fold
&& handle
&& fold
->handle
!= handle
) {
1958 fnew
= kzalloc(sizeof(*fnew
), GFP_KERNEL
);
1963 INIT_LIST_HEAD(&fnew
->hw_list
);
1964 refcount_set(&fnew
->refcnt
, 1);
1966 err
= tcf_exts_init(&fnew
->exts
, net
, TCA_FLOWER_ACT
, 0);
1970 if (tb
[TCA_FLOWER_FLAGS
]) {
1971 fnew
->flags
= nla_get_u32(tb
[TCA_FLOWER_FLAGS
]);
1973 if (!tc_flags_valid(fnew
->flags
)) {
1979 err
= fl_set_parms(net
, tp
, fnew
, mask
, base
, tb
, tca
[TCA_RATE
], ovr
,
1980 tp
->chain
->tmplt_priv
, rtnl_held
, extack
);
1984 err
= fl_check_assign_mask(head
, fnew
, fold
, mask
);
1988 err
= fl_ht_insert_unique(fnew
, fold
, &in_ht
);
1992 if (!tc_skip_hw(fnew
->flags
)) {
1993 err
= fl_hw_replace_filter(tp
, fnew
, rtnl_held
, extack
);
1998 if (!tc_in_hw(fnew
->flags
))
1999 fnew
->flags
|= TCA_CLS_FLAGS_NOT_IN_HW
;
2001 spin_lock(&tp
->lock
);
2003 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2004 * proto again or create new one, if necessary.
2012 /* Fold filter was deleted concurrently. Retry lookup. */
2013 if (fold
->deleted
) {
2018 fnew
->handle
= handle
;
2021 struct rhashtable_params params
=
2022 fnew
->mask
->filter_ht_params
;
2024 err
= rhashtable_insert_fast(&fnew
->mask
->ht
,
2032 refcount_inc(&fnew
->refcnt
);
2033 rhashtable_remove_fast(&fold
->mask
->ht
,
2035 fold
->mask
->filter_ht_params
);
2036 idr_replace(&head
->handle_idr
, fnew
, fnew
->handle
);
2037 list_replace_rcu(&fold
->list
, &fnew
->list
);
2038 fold
->deleted
= true;
2040 spin_unlock(&tp
->lock
);
2042 fl_mask_put(head
, fold
->mask
);
2043 if (!tc_skip_hw(fold
->flags
))
2044 fl_hw_destroy_filter(tp
, fold
, rtnl_held
, NULL
);
2045 tcf_unbind_filter(tp
, &fold
->res
);
2046 /* Caller holds reference to fold, so refcnt is always > 0
2049 refcount_dec(&fold
->refcnt
);
2053 /* user specifies a handle and it doesn't exist */
2054 err
= idr_alloc_u32(&head
->handle_idr
, fnew
, &handle
,
2055 handle
, GFP_ATOMIC
);
2057 /* Filter with specified handle was concurrently
2058 * inserted after initial check in cls_api. This is not
2059 * necessarily an error if NLM_F_EXCL is not set in
2060 * message flags. Returning EAGAIN will cause cls_api to
2061 * try to update concurrently inserted rule.
2067 err
= idr_alloc_u32(&head
->handle_idr
, fnew
, &handle
,
2068 INT_MAX
, GFP_ATOMIC
);
2073 refcount_inc(&fnew
->refcnt
);
2074 fnew
->handle
= handle
;
2075 list_add_tail_rcu(&fnew
->list
, &fnew
->mask
->filters
);
2076 spin_unlock(&tp
->lock
);
2082 tcf_queue_work(&mask
->rwork
, fl_uninit_mask_free_work
);
2086 spin_lock(&tp
->lock
);
2088 fnew
->deleted
= true;
2089 spin_unlock(&tp
->lock
);
2090 if (!tc_skip_hw(fnew
->flags
))
2091 fl_hw_destroy_filter(tp
, fnew
, rtnl_held
, NULL
);
2093 rhashtable_remove_fast(&fnew
->mask
->ht
, &fnew
->ht_node
,
2094 fnew
->mask
->filter_ht_params
);
2096 fl_mask_put(head
, fnew
->mask
);
2102 tcf_queue_work(&mask
->rwork
, fl_uninit_mask_free_work
);
2109 static int fl_delete(struct tcf_proto
*tp
, void *arg
, bool *last
,
2110 bool rtnl_held
, struct netlink_ext_ack
*extack
)
2112 struct cls_fl_head
*head
= fl_head_dereference(tp
);
2113 struct cls_fl_filter
*f
= arg
;
2117 err
= __fl_delete(tp
, f
, &last_on_mask
, rtnl_held
, extack
);
2118 *last
= list_empty(&head
->masks
);
2124 static void fl_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
,
2127 struct cls_fl_head
*head
= fl_head_dereference(tp
);
2128 unsigned long id
= arg
->cookie
, tmp
;
2129 struct cls_fl_filter
*f
;
2131 arg
->count
= arg
->skip
;
2133 idr_for_each_entry_continue_ul(&head
->handle_idr
, f
, tmp
, id
) {
2134 /* don't return filters that are being deleted */
2135 if (!refcount_inc_not_zero(&f
->refcnt
))
2137 if (arg
->fn(tp
, f
, arg
) < 0) {
2148 static struct cls_fl_filter
*
2149 fl_get_next_hw_filter(struct tcf_proto
*tp
, struct cls_fl_filter
*f
, bool add
)
2151 struct cls_fl_head
*head
= fl_head_dereference(tp
);
2153 spin_lock(&tp
->lock
);
2154 if (list_empty(&head
->hw_filters
)) {
2155 spin_unlock(&tp
->lock
);
2160 f
= list_entry(&head
->hw_filters
, struct cls_fl_filter
,
2162 list_for_each_entry_continue(f
, &head
->hw_filters
, hw_list
) {
2163 if (!(add
&& f
->deleted
) && refcount_inc_not_zero(&f
->refcnt
)) {
2164 spin_unlock(&tp
->lock
);
2169 spin_unlock(&tp
->lock
);
2173 static int fl_reoffload(struct tcf_proto
*tp
, bool add
, flow_setup_cb_t
*cb
,
2174 void *cb_priv
, struct netlink_ext_ack
*extack
)
2176 struct tcf_block
*block
= tp
->chain
->block
;
2177 struct flow_cls_offload cls_flower
= {};
2178 struct cls_fl_filter
*f
= NULL
;
2181 /* hw_filters list can only be changed by hw offload functions after
2182 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2187 while ((f
= fl_get_next_hw_filter(tp
, f
, add
))) {
2189 flow_rule_alloc(tcf_exts_num_actions(&f
->exts
));
2190 if (!cls_flower
.rule
) {
2195 tc_cls_common_offload_init(&cls_flower
.common
, tp
, f
->flags
,
2197 cls_flower
.command
= add
?
2198 FLOW_CLS_REPLACE
: FLOW_CLS_DESTROY
;
2199 cls_flower
.cookie
= (unsigned long)f
;
2200 cls_flower
.rule
->match
.dissector
= &f
->mask
->dissector
;
2201 cls_flower
.rule
->match
.mask
= &f
->mask
->key
;
2202 cls_flower
.rule
->match
.key
= &f
->mkey
;
2204 err
= tc_setup_flow_action(&cls_flower
.rule
->action
, &f
->exts
);
2206 kfree(cls_flower
.rule
);
2207 if (tc_skip_sw(f
->flags
)) {
2208 NL_SET_ERR_MSG_MOD(extack
, "Failed to setup flow action");
2215 cls_flower
.classid
= f
->res
.classid
;
2217 err
= tc_setup_cb_reoffload(block
, tp
, add
, cb
,
2218 TC_SETUP_CLSFLOWER
, &cls_flower
,
2221 tc_cleanup_flow_action(&cls_flower
.rule
->action
);
2222 kfree(cls_flower
.rule
);
2235 static void fl_hw_add(struct tcf_proto
*tp
, void *type_data
)
2237 struct flow_cls_offload
*cls_flower
= type_data
;
2238 struct cls_fl_filter
*f
=
2239 (struct cls_fl_filter
*) cls_flower
->cookie
;
2240 struct cls_fl_head
*head
= fl_head_dereference(tp
);
2242 spin_lock(&tp
->lock
);
2243 list_add(&f
->hw_list
, &head
->hw_filters
);
2244 spin_unlock(&tp
->lock
);
2247 static void fl_hw_del(struct tcf_proto
*tp
, void *type_data
)
2249 struct flow_cls_offload
*cls_flower
= type_data
;
2250 struct cls_fl_filter
*f
=
2251 (struct cls_fl_filter
*) cls_flower
->cookie
;
2253 spin_lock(&tp
->lock
);
2254 if (!list_empty(&f
->hw_list
))
2255 list_del_init(&f
->hw_list
);
2256 spin_unlock(&tp
->lock
);
2259 static int fl_hw_create_tmplt(struct tcf_chain
*chain
,
2260 struct fl_flow_tmplt
*tmplt
)
2262 struct flow_cls_offload cls_flower
= {};
2263 struct tcf_block
*block
= chain
->block
;
2265 cls_flower
.rule
= flow_rule_alloc(0);
2266 if (!cls_flower
.rule
)
2269 cls_flower
.common
.chain_index
= chain
->index
;
2270 cls_flower
.command
= FLOW_CLS_TMPLT_CREATE
;
2271 cls_flower
.cookie
= (unsigned long) tmplt
;
2272 cls_flower
.rule
->match
.dissector
= &tmplt
->dissector
;
2273 cls_flower
.rule
->match
.mask
= &tmplt
->mask
;
2274 cls_flower
.rule
->match
.key
= &tmplt
->dummy_key
;
2276 /* We don't care if driver (any of them) fails to handle this
2277 * call. It serves just as a hint for it.
2279 tc_setup_cb_call(block
, TC_SETUP_CLSFLOWER
, &cls_flower
, false, true);
2280 kfree(cls_flower
.rule
);
2285 static void fl_hw_destroy_tmplt(struct tcf_chain
*chain
,
2286 struct fl_flow_tmplt
*tmplt
)
2288 struct flow_cls_offload cls_flower
= {};
2289 struct tcf_block
*block
= chain
->block
;
2291 cls_flower
.common
.chain_index
= chain
->index
;
2292 cls_flower
.command
= FLOW_CLS_TMPLT_DESTROY
;
2293 cls_flower
.cookie
= (unsigned long) tmplt
;
2295 tc_setup_cb_call(block
, TC_SETUP_CLSFLOWER
, &cls_flower
, false, true);
2298 static void *fl_tmplt_create(struct net
*net
, struct tcf_chain
*chain
,
2299 struct nlattr
**tca
,
2300 struct netlink_ext_ack
*extack
)
2302 struct fl_flow_tmplt
*tmplt
;
2306 if (!tca
[TCA_OPTIONS
])
2307 return ERR_PTR(-EINVAL
);
2309 tb
= kcalloc(TCA_FLOWER_MAX
+ 1, sizeof(struct nlattr
*), GFP_KERNEL
);
2311 return ERR_PTR(-ENOBUFS
);
2312 err
= nla_parse_nested_deprecated(tb
, TCA_FLOWER_MAX
,
2313 tca
[TCA_OPTIONS
], fl_policy
, NULL
);
2317 tmplt
= kzalloc(sizeof(*tmplt
), GFP_KERNEL
);
2322 tmplt
->chain
= chain
;
2323 err
= fl_set_key(net
, tb
, &tmplt
->dummy_key
, &tmplt
->mask
, extack
);
2327 fl_init_dissector(&tmplt
->dissector
, &tmplt
->mask
);
2329 err
= fl_hw_create_tmplt(chain
, tmplt
);
2340 return ERR_PTR(err
);
2343 static void fl_tmplt_destroy(void *tmplt_priv
)
2345 struct fl_flow_tmplt
*tmplt
= tmplt_priv
;
2347 fl_hw_destroy_tmplt(tmplt
->chain
, tmplt
);
2351 static int fl_dump_key_val(struct sk_buff
*skb
,
2352 void *val
, int val_type
,
2353 void *mask
, int mask_type
, int len
)
2357 if (!memchr_inv(mask
, 0, len
))
2359 err
= nla_put(skb
, val_type
, len
, val
);
2362 if (mask_type
!= TCA_FLOWER_UNSPEC
) {
2363 err
= nla_put(skb
, mask_type
, len
, mask
);
2370 static int fl_dump_key_port_range(struct sk_buff
*skb
, struct fl_flow_key
*key
,
2371 struct fl_flow_key
*mask
)
2373 if (fl_dump_key_val(skb
, &key
->tp_range
.tp_min
.dst
,
2374 TCA_FLOWER_KEY_PORT_DST_MIN
,
2375 &mask
->tp_range
.tp_min
.dst
, TCA_FLOWER_UNSPEC
,
2376 sizeof(key
->tp_range
.tp_min
.dst
)) ||
2377 fl_dump_key_val(skb
, &key
->tp_range
.tp_max
.dst
,
2378 TCA_FLOWER_KEY_PORT_DST_MAX
,
2379 &mask
->tp_range
.tp_max
.dst
, TCA_FLOWER_UNSPEC
,
2380 sizeof(key
->tp_range
.tp_max
.dst
)) ||
2381 fl_dump_key_val(skb
, &key
->tp_range
.tp_min
.src
,
2382 TCA_FLOWER_KEY_PORT_SRC_MIN
,
2383 &mask
->tp_range
.tp_min
.src
, TCA_FLOWER_UNSPEC
,
2384 sizeof(key
->tp_range
.tp_min
.src
)) ||
2385 fl_dump_key_val(skb
, &key
->tp_range
.tp_max
.src
,
2386 TCA_FLOWER_KEY_PORT_SRC_MAX
,
2387 &mask
->tp_range
.tp_max
.src
, TCA_FLOWER_UNSPEC
,
2388 sizeof(key
->tp_range
.tp_max
.src
)))
2394 static int fl_dump_key_mpls_opt_lse(struct sk_buff
*skb
,
2395 struct flow_dissector_key_mpls
*mpls_key
,
2396 struct flow_dissector_key_mpls
*mpls_mask
,
2399 struct flow_dissector_mpls_lse
*lse_mask
= &mpls_mask
->ls
[lse_index
];
2400 struct flow_dissector_mpls_lse
*lse_key
= &mpls_key
->ls
[lse_index
];
2403 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH
,
2408 if (lse_mask
->mpls_ttl
) {
2409 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL
,
2414 if (lse_mask
->mpls_bos
) {
2415 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS
,
2420 if (lse_mask
->mpls_tc
) {
2421 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC
,
2426 if (lse_mask
->mpls_label
) {
2427 err
= nla_put_u32(skb
, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL
,
2428 lse_key
->mpls_label
);
2436 static int fl_dump_key_mpls_opts(struct sk_buff
*skb
,
2437 struct flow_dissector_key_mpls
*mpls_key
,
2438 struct flow_dissector_key_mpls
*mpls_mask
)
2440 struct nlattr
*opts
;
2445 opts
= nla_nest_start(skb
, TCA_FLOWER_KEY_MPLS_OPTS
);
2449 for (lse_index
= 0; lse_index
< FLOW_DIS_MPLS_MAX
; lse_index
++) {
2450 if (!(mpls_mask
->used_lses
& 1 << lse_index
))
2453 lse
= nla_nest_start(skb
, TCA_FLOWER_KEY_MPLS_OPTS_LSE
);
2459 err
= fl_dump_key_mpls_opt_lse(skb
, mpls_key
, mpls_mask
,
2463 nla_nest_end(skb
, lse
);
2465 nla_nest_end(skb
, opts
);
2470 nla_nest_cancel(skb
, lse
);
2472 nla_nest_cancel(skb
, opts
);
2477 static int fl_dump_key_mpls(struct sk_buff
*skb
,
2478 struct flow_dissector_key_mpls
*mpls_key
,
2479 struct flow_dissector_key_mpls
*mpls_mask
)
2481 struct flow_dissector_mpls_lse
*lse_mask
;
2482 struct flow_dissector_mpls_lse
*lse_key
;
2485 if (!mpls_mask
->used_lses
)
2488 lse_mask
= &mpls_mask
->ls
[0];
2489 lse_key
= &mpls_key
->ls
[0];
2491 /* For backward compatibility, don't use the MPLS nested attributes if
2492 * the rule can be expressed using the old attributes.
2494 if (mpls_mask
->used_lses
& ~1 ||
2495 (!lse_mask
->mpls_ttl
&& !lse_mask
->mpls_bos
&&
2496 !lse_mask
->mpls_tc
&& !lse_mask
->mpls_label
))
2497 return fl_dump_key_mpls_opts(skb
, mpls_key
, mpls_mask
);
2499 if (lse_mask
->mpls_ttl
) {
2500 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_TTL
,
2505 if (lse_mask
->mpls_tc
) {
2506 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_TC
,
2511 if (lse_mask
->mpls_label
) {
2512 err
= nla_put_u32(skb
, TCA_FLOWER_KEY_MPLS_LABEL
,
2513 lse_key
->mpls_label
);
2517 if (lse_mask
->mpls_bos
) {
2518 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_BOS
,
2526 static int fl_dump_key_ip(struct sk_buff
*skb
, bool encap
,
2527 struct flow_dissector_key_ip
*key
,
2528 struct flow_dissector_key_ip
*mask
)
2530 int tos_key
= encap
? TCA_FLOWER_KEY_ENC_IP_TOS
: TCA_FLOWER_KEY_IP_TOS
;
2531 int ttl_key
= encap
? TCA_FLOWER_KEY_ENC_IP_TTL
: TCA_FLOWER_KEY_IP_TTL
;
2532 int tos_mask
= encap
? TCA_FLOWER_KEY_ENC_IP_TOS_MASK
: TCA_FLOWER_KEY_IP_TOS_MASK
;
2533 int ttl_mask
= encap
? TCA_FLOWER_KEY_ENC_IP_TTL_MASK
: TCA_FLOWER_KEY_IP_TTL_MASK
;
2535 if (fl_dump_key_val(skb
, &key
->tos
, tos_key
, &mask
->tos
, tos_mask
, sizeof(key
->tos
)) ||
2536 fl_dump_key_val(skb
, &key
->ttl
, ttl_key
, &mask
->ttl
, ttl_mask
, sizeof(key
->ttl
)))
2542 static int fl_dump_key_vlan(struct sk_buff
*skb
,
2543 int vlan_id_key
, int vlan_prio_key
,
2544 struct flow_dissector_key_vlan
*vlan_key
,
2545 struct flow_dissector_key_vlan
*vlan_mask
)
2549 if (!memchr_inv(vlan_mask
, 0, sizeof(*vlan_mask
)))
2551 if (vlan_mask
->vlan_id
) {
2552 err
= nla_put_u16(skb
, vlan_id_key
,
2557 if (vlan_mask
->vlan_priority
) {
2558 err
= nla_put_u8(skb
, vlan_prio_key
,
2559 vlan_key
->vlan_priority
);
2566 static void fl_get_key_flag(u32 dissector_key
, u32 dissector_mask
,
2567 u32
*flower_key
, u32
*flower_mask
,
2568 u32 flower_flag_bit
, u32 dissector_flag_bit
)
2570 if (dissector_mask
& dissector_flag_bit
) {
2571 *flower_mask
|= flower_flag_bit
;
2572 if (dissector_key
& dissector_flag_bit
)
2573 *flower_key
|= flower_flag_bit
;
2577 static int fl_dump_key_flags(struct sk_buff
*skb
, u32 flags_key
, u32 flags_mask
)
2583 if (!memchr_inv(&flags_mask
, 0, sizeof(flags_mask
)))
2589 fl_get_key_flag(flags_key
, flags_mask
, &key
, &mask
,
2590 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT
, FLOW_DIS_IS_FRAGMENT
);
2591 fl_get_key_flag(flags_key
, flags_mask
, &key
, &mask
,
2592 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST
,
2593 FLOW_DIS_FIRST_FRAG
);
2595 _key
= cpu_to_be32(key
);
2596 _mask
= cpu_to_be32(mask
);
2598 err
= nla_put(skb
, TCA_FLOWER_KEY_FLAGS
, 4, &_key
);
2602 return nla_put(skb
, TCA_FLOWER_KEY_FLAGS_MASK
, 4, &_mask
);
2605 static int fl_dump_key_geneve_opt(struct sk_buff
*skb
,
2606 struct flow_dissector_key_enc_opts
*enc_opts
)
2608 struct geneve_opt
*opt
;
2609 struct nlattr
*nest
;
2612 nest
= nla_nest_start_noflag(skb
, TCA_FLOWER_KEY_ENC_OPTS_GENEVE
);
2614 goto nla_put_failure
;
2616 while (enc_opts
->len
> opt_off
) {
2617 opt
= (struct geneve_opt
*)&enc_opts
->data
[opt_off
];
2619 if (nla_put_be16(skb
, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
,
2621 goto nla_put_failure
;
2622 if (nla_put_u8(skb
, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
,
2624 goto nla_put_failure
;
2625 if (nla_put(skb
, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
,
2626 opt
->length
* 4, opt
->opt_data
))
2627 goto nla_put_failure
;
2629 opt_off
+= sizeof(struct geneve_opt
) + opt
->length
* 4;
2631 nla_nest_end(skb
, nest
);
2635 nla_nest_cancel(skb
, nest
);
2639 static int fl_dump_key_vxlan_opt(struct sk_buff
*skb
,
2640 struct flow_dissector_key_enc_opts
*enc_opts
)
2642 struct vxlan_metadata
*md
;
2643 struct nlattr
*nest
;
2645 nest
= nla_nest_start_noflag(skb
, TCA_FLOWER_KEY_ENC_OPTS_VXLAN
);
2647 goto nla_put_failure
;
2649 md
= (struct vxlan_metadata
*)&enc_opts
->data
[0];
2650 if (nla_put_u32(skb
, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP
, md
->gbp
))
2651 goto nla_put_failure
;
2653 nla_nest_end(skb
, nest
);
2657 nla_nest_cancel(skb
, nest
);
2661 static int fl_dump_key_erspan_opt(struct sk_buff
*skb
,
2662 struct flow_dissector_key_enc_opts
*enc_opts
)
2664 struct erspan_metadata
*md
;
2665 struct nlattr
*nest
;
2667 nest
= nla_nest_start_noflag(skb
, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN
);
2669 goto nla_put_failure
;
2671 md
= (struct erspan_metadata
*)&enc_opts
->data
[0];
2672 if (nla_put_u8(skb
, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER
, md
->version
))
2673 goto nla_put_failure
;
2675 if (md
->version
== 1 &&
2676 nla_put_be32(skb
, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX
, md
->u
.index
))
2677 goto nla_put_failure
;
2679 if (md
->version
== 2 &&
2680 (nla_put_u8(skb
, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR
,
2682 nla_put_u8(skb
, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID
,
2683 get_hwid(&md
->u
.md2
))))
2684 goto nla_put_failure
;
2686 nla_nest_end(skb
, nest
);
2690 nla_nest_cancel(skb
, nest
);
2694 static int fl_dump_key_ct(struct sk_buff
*skb
,
2695 struct flow_dissector_key_ct
*key
,
2696 struct flow_dissector_key_ct
*mask
)
2698 if (IS_ENABLED(CONFIG_NF_CONNTRACK
) &&
2699 fl_dump_key_val(skb
, &key
->ct_state
, TCA_FLOWER_KEY_CT_STATE
,
2700 &mask
->ct_state
, TCA_FLOWER_KEY_CT_STATE_MASK
,
2701 sizeof(key
->ct_state
)))
2702 goto nla_put_failure
;
2704 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES
) &&
2705 fl_dump_key_val(skb
, &key
->ct_zone
, TCA_FLOWER_KEY_CT_ZONE
,
2706 &mask
->ct_zone
, TCA_FLOWER_KEY_CT_ZONE_MASK
,
2707 sizeof(key
->ct_zone
)))
2708 goto nla_put_failure
;
2710 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK
) &&
2711 fl_dump_key_val(skb
, &key
->ct_mark
, TCA_FLOWER_KEY_CT_MARK
,
2712 &mask
->ct_mark
, TCA_FLOWER_KEY_CT_MARK_MASK
,
2713 sizeof(key
->ct_mark
)))
2714 goto nla_put_failure
;
2716 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS
) &&
2717 fl_dump_key_val(skb
, &key
->ct_labels
, TCA_FLOWER_KEY_CT_LABELS
,
2718 &mask
->ct_labels
, TCA_FLOWER_KEY_CT_LABELS_MASK
,
2719 sizeof(key
->ct_labels
)))
2720 goto nla_put_failure
;
2728 static int fl_dump_key_options(struct sk_buff
*skb
, int enc_opt_type
,
2729 struct flow_dissector_key_enc_opts
*enc_opts
)
2731 struct nlattr
*nest
;
2737 nest
= nla_nest_start_noflag(skb
, enc_opt_type
);
2739 goto nla_put_failure
;
2741 switch (enc_opts
->dst_opt_type
) {
2742 case TUNNEL_GENEVE_OPT
:
2743 err
= fl_dump_key_geneve_opt(skb
, enc_opts
);
2745 goto nla_put_failure
;
2747 case TUNNEL_VXLAN_OPT
:
2748 err
= fl_dump_key_vxlan_opt(skb
, enc_opts
);
2750 goto nla_put_failure
;
2752 case TUNNEL_ERSPAN_OPT
:
2753 err
= fl_dump_key_erspan_opt(skb
, enc_opts
);
2755 goto nla_put_failure
;
2758 goto nla_put_failure
;
2760 nla_nest_end(skb
, nest
);
2764 nla_nest_cancel(skb
, nest
);
2768 static int fl_dump_key_enc_opt(struct sk_buff
*skb
,
2769 struct flow_dissector_key_enc_opts
*key_opts
,
2770 struct flow_dissector_key_enc_opts
*msk_opts
)
2774 err
= fl_dump_key_options(skb
, TCA_FLOWER_KEY_ENC_OPTS
, key_opts
);
2778 return fl_dump_key_options(skb
, TCA_FLOWER_KEY_ENC_OPTS_MASK
, msk_opts
);
2781 static int fl_dump_key(struct sk_buff
*skb
, struct net
*net
,
2782 struct fl_flow_key
*key
, struct fl_flow_key
*mask
)
2784 if (mask
->meta
.ingress_ifindex
) {
2785 struct net_device
*dev
;
2787 dev
= __dev_get_by_index(net
, key
->meta
.ingress_ifindex
);
2788 if (dev
&& nla_put_string(skb
, TCA_FLOWER_INDEV
, dev
->name
))
2789 goto nla_put_failure
;
2792 if (fl_dump_key_val(skb
, key
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST
,
2793 mask
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST_MASK
,
2794 sizeof(key
->eth
.dst
)) ||
2795 fl_dump_key_val(skb
, key
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC
,
2796 mask
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC_MASK
,
2797 sizeof(key
->eth
.src
)) ||
2798 fl_dump_key_val(skb
, &key
->basic
.n_proto
, TCA_FLOWER_KEY_ETH_TYPE
,
2799 &mask
->basic
.n_proto
, TCA_FLOWER_UNSPEC
,
2800 sizeof(key
->basic
.n_proto
)))
2801 goto nla_put_failure
;
2803 if (fl_dump_key_mpls(skb
, &key
->mpls
, &mask
->mpls
))
2804 goto nla_put_failure
;
2806 if (fl_dump_key_vlan(skb
, TCA_FLOWER_KEY_VLAN_ID
,
2807 TCA_FLOWER_KEY_VLAN_PRIO
, &key
->vlan
, &mask
->vlan
))
2808 goto nla_put_failure
;
2810 if (fl_dump_key_vlan(skb
, TCA_FLOWER_KEY_CVLAN_ID
,
2811 TCA_FLOWER_KEY_CVLAN_PRIO
,
2812 &key
->cvlan
, &mask
->cvlan
) ||
2813 (mask
->cvlan
.vlan_tpid
&&
2814 nla_put_be16(skb
, TCA_FLOWER_KEY_VLAN_ETH_TYPE
,
2815 key
->cvlan
.vlan_tpid
)))
2816 goto nla_put_failure
;
2818 if (mask
->basic
.n_proto
) {
2819 if (mask
->cvlan
.vlan_tpid
) {
2820 if (nla_put_be16(skb
, TCA_FLOWER_KEY_CVLAN_ETH_TYPE
,
2821 key
->basic
.n_proto
))
2822 goto nla_put_failure
;
2823 } else if (mask
->vlan
.vlan_tpid
) {
2824 if (nla_put_be16(skb
, TCA_FLOWER_KEY_VLAN_ETH_TYPE
,
2825 key
->basic
.n_proto
))
2826 goto nla_put_failure
;
2830 if ((key
->basic
.n_proto
== htons(ETH_P_IP
) ||
2831 key
->basic
.n_proto
== htons(ETH_P_IPV6
)) &&
2832 (fl_dump_key_val(skb
, &key
->basic
.ip_proto
, TCA_FLOWER_KEY_IP_PROTO
,
2833 &mask
->basic
.ip_proto
, TCA_FLOWER_UNSPEC
,
2834 sizeof(key
->basic
.ip_proto
)) ||
2835 fl_dump_key_ip(skb
, false, &key
->ip
, &mask
->ip
)))
2836 goto nla_put_failure
;
2838 if (key
->control
.addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
&&
2839 (fl_dump_key_val(skb
, &key
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC
,
2840 &mask
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC_MASK
,
2841 sizeof(key
->ipv4
.src
)) ||
2842 fl_dump_key_val(skb
, &key
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST
,
2843 &mask
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST_MASK
,
2844 sizeof(key
->ipv4
.dst
))))
2845 goto nla_put_failure
;
2846 else if (key
->control
.addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
&&
2847 (fl_dump_key_val(skb
, &key
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC
,
2848 &mask
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC_MASK
,
2849 sizeof(key
->ipv6
.src
)) ||
2850 fl_dump_key_val(skb
, &key
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST
,
2851 &mask
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST_MASK
,
2852 sizeof(key
->ipv6
.dst
))))
2853 goto nla_put_failure
;
2855 if (key
->basic
.ip_proto
== IPPROTO_TCP
&&
2856 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC
,
2857 &mask
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC_MASK
,
2858 sizeof(key
->tp
.src
)) ||
2859 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST
,
2860 &mask
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST_MASK
,
2861 sizeof(key
->tp
.dst
)) ||
2862 fl_dump_key_val(skb
, &key
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS
,
2863 &mask
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS_MASK
,
2864 sizeof(key
->tcp
.flags
))))
2865 goto nla_put_failure
;
2866 else if (key
->basic
.ip_proto
== IPPROTO_UDP
&&
2867 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC
,
2868 &mask
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC_MASK
,
2869 sizeof(key
->tp
.src
)) ||
2870 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST
,
2871 &mask
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST_MASK
,
2872 sizeof(key
->tp
.dst
))))
2873 goto nla_put_failure
;
2874 else if (key
->basic
.ip_proto
== IPPROTO_SCTP
&&
2875 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC
,
2876 &mask
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC_MASK
,
2877 sizeof(key
->tp
.src
)) ||
2878 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST
,
2879 &mask
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST_MASK
,
2880 sizeof(key
->tp
.dst
))))
2881 goto nla_put_failure
;
2882 else if (key
->basic
.n_proto
== htons(ETH_P_IP
) &&
2883 key
->basic
.ip_proto
== IPPROTO_ICMP
&&
2884 (fl_dump_key_val(skb
, &key
->icmp
.type
,
2885 TCA_FLOWER_KEY_ICMPV4_TYPE
, &mask
->icmp
.type
,
2886 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK
,
2887 sizeof(key
->icmp
.type
)) ||
2888 fl_dump_key_val(skb
, &key
->icmp
.code
,
2889 TCA_FLOWER_KEY_ICMPV4_CODE
, &mask
->icmp
.code
,
2890 TCA_FLOWER_KEY_ICMPV4_CODE_MASK
,
2891 sizeof(key
->icmp
.code
))))
2892 goto nla_put_failure
;
2893 else if (key
->basic
.n_proto
== htons(ETH_P_IPV6
) &&
2894 key
->basic
.ip_proto
== IPPROTO_ICMPV6
&&
2895 (fl_dump_key_val(skb
, &key
->icmp
.type
,
2896 TCA_FLOWER_KEY_ICMPV6_TYPE
, &mask
->icmp
.type
,
2897 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK
,
2898 sizeof(key
->icmp
.type
)) ||
2899 fl_dump_key_val(skb
, &key
->icmp
.code
,
2900 TCA_FLOWER_KEY_ICMPV6_CODE
, &mask
->icmp
.code
,
2901 TCA_FLOWER_KEY_ICMPV6_CODE_MASK
,
2902 sizeof(key
->icmp
.code
))))
2903 goto nla_put_failure
;
2904 else if ((key
->basic
.n_proto
== htons(ETH_P_ARP
) ||
2905 key
->basic
.n_proto
== htons(ETH_P_RARP
)) &&
2906 (fl_dump_key_val(skb
, &key
->arp
.sip
,
2907 TCA_FLOWER_KEY_ARP_SIP
, &mask
->arp
.sip
,
2908 TCA_FLOWER_KEY_ARP_SIP_MASK
,
2909 sizeof(key
->arp
.sip
)) ||
2910 fl_dump_key_val(skb
, &key
->arp
.tip
,
2911 TCA_FLOWER_KEY_ARP_TIP
, &mask
->arp
.tip
,
2912 TCA_FLOWER_KEY_ARP_TIP_MASK
,
2913 sizeof(key
->arp
.tip
)) ||
2914 fl_dump_key_val(skb
, &key
->arp
.op
,
2915 TCA_FLOWER_KEY_ARP_OP
, &mask
->arp
.op
,
2916 TCA_FLOWER_KEY_ARP_OP_MASK
,
2917 sizeof(key
->arp
.op
)) ||
2918 fl_dump_key_val(skb
, key
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA
,
2919 mask
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA_MASK
,
2920 sizeof(key
->arp
.sha
)) ||
2921 fl_dump_key_val(skb
, key
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA
,
2922 mask
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA_MASK
,
2923 sizeof(key
->arp
.tha
))))
2924 goto nla_put_failure
;
2926 if ((key
->basic
.ip_proto
== IPPROTO_TCP
||
2927 key
->basic
.ip_proto
== IPPROTO_UDP
||
2928 key
->basic
.ip_proto
== IPPROTO_SCTP
) &&
2929 fl_dump_key_port_range(skb
, key
, mask
))
2930 goto nla_put_failure
;
2932 if (key
->enc_control
.addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
&&
2933 (fl_dump_key_val(skb
, &key
->enc_ipv4
.src
,
2934 TCA_FLOWER_KEY_ENC_IPV4_SRC
, &mask
->enc_ipv4
.src
,
2935 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
,
2936 sizeof(key
->enc_ipv4
.src
)) ||
2937 fl_dump_key_val(skb
, &key
->enc_ipv4
.dst
,
2938 TCA_FLOWER_KEY_ENC_IPV4_DST
, &mask
->enc_ipv4
.dst
,
2939 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
,
2940 sizeof(key
->enc_ipv4
.dst
))))
2941 goto nla_put_failure
;
2942 else if (key
->enc_control
.addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
&&
2943 (fl_dump_key_val(skb
, &key
->enc_ipv6
.src
,
2944 TCA_FLOWER_KEY_ENC_IPV6_SRC
, &mask
->enc_ipv6
.src
,
2945 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
,
2946 sizeof(key
->enc_ipv6
.src
)) ||
2947 fl_dump_key_val(skb
, &key
->enc_ipv6
.dst
,
2948 TCA_FLOWER_KEY_ENC_IPV6_DST
,
2949 &mask
->enc_ipv6
.dst
,
2950 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
,
2951 sizeof(key
->enc_ipv6
.dst
))))
2952 goto nla_put_failure
;
2954 if (fl_dump_key_val(skb
, &key
->enc_key_id
, TCA_FLOWER_KEY_ENC_KEY_ID
,
2955 &mask
->enc_key_id
, TCA_FLOWER_UNSPEC
,
2956 sizeof(key
->enc_key_id
)) ||
2957 fl_dump_key_val(skb
, &key
->enc_tp
.src
,
2958 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
,
2960 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
,
2961 sizeof(key
->enc_tp
.src
)) ||
2962 fl_dump_key_val(skb
, &key
->enc_tp
.dst
,
2963 TCA_FLOWER_KEY_ENC_UDP_DST_PORT
,
2965 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
,
2966 sizeof(key
->enc_tp
.dst
)) ||
2967 fl_dump_key_ip(skb
, true, &key
->enc_ip
, &mask
->enc_ip
) ||
2968 fl_dump_key_enc_opt(skb
, &key
->enc_opts
, &mask
->enc_opts
))
2969 goto nla_put_failure
;
2971 if (fl_dump_key_ct(skb
, &key
->ct
, &mask
->ct
))
2972 goto nla_put_failure
;
2974 if (fl_dump_key_flags(skb
, key
->control
.flags
, mask
->control
.flags
))
2975 goto nla_put_failure
;
2977 if (fl_dump_key_val(skb
, &key
->hash
.hash
, TCA_FLOWER_KEY_HASH
,
2978 &mask
->hash
.hash
, TCA_FLOWER_KEY_HASH_MASK
,
2979 sizeof(key
->hash
.hash
)))
2980 goto nla_put_failure
;
2988 static int fl_dump(struct net
*net
, struct tcf_proto
*tp
, void *fh
,
2989 struct sk_buff
*skb
, struct tcmsg
*t
, bool rtnl_held
)
2991 struct cls_fl_filter
*f
= fh
;
2992 struct nlattr
*nest
;
2993 struct fl_flow_key
*key
, *mask
;
2999 t
->tcm_handle
= f
->handle
;
3001 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
3003 goto nla_put_failure
;
3005 spin_lock(&tp
->lock
);
3007 if (f
->res
.classid
&&
3008 nla_put_u32(skb
, TCA_FLOWER_CLASSID
, f
->res
.classid
))
3009 goto nla_put_failure_locked
;
3012 mask
= &f
->mask
->key
;
3013 skip_hw
= tc_skip_hw(f
->flags
);
3015 if (fl_dump_key(skb
, net
, key
, mask
))
3016 goto nla_put_failure_locked
;
3018 if (f
->flags
&& nla_put_u32(skb
, TCA_FLOWER_FLAGS
, f
->flags
))
3019 goto nla_put_failure_locked
;
3021 spin_unlock(&tp
->lock
);
3024 fl_hw_update_stats(tp
, f
, rtnl_held
);
3026 if (nla_put_u32(skb
, TCA_FLOWER_IN_HW_COUNT
, f
->in_hw_count
))
3027 goto nla_put_failure
;
3029 if (tcf_exts_dump(skb
, &f
->exts
))
3030 goto nla_put_failure
;
3032 nla_nest_end(skb
, nest
);
3034 if (tcf_exts_dump_stats(skb
, &f
->exts
) < 0)
3035 goto nla_put_failure
;
3039 nla_put_failure_locked
:
3040 spin_unlock(&tp
->lock
);
3042 nla_nest_cancel(skb
, nest
);
3046 static int fl_terse_dump(struct net
*net
, struct tcf_proto
*tp
, void *fh
,
3047 struct sk_buff
*skb
, struct tcmsg
*t
, bool rtnl_held
)
3049 struct cls_fl_filter
*f
= fh
;
3050 struct nlattr
*nest
;
3056 t
->tcm_handle
= f
->handle
;
3058 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
3060 goto nla_put_failure
;
3062 spin_lock(&tp
->lock
);
3064 skip_hw
= tc_skip_hw(f
->flags
);
3066 if (f
->flags
&& nla_put_u32(skb
, TCA_FLOWER_FLAGS
, f
->flags
))
3067 goto nla_put_failure_locked
;
3069 spin_unlock(&tp
->lock
);
3072 fl_hw_update_stats(tp
, f
, rtnl_held
);
3074 if (tcf_exts_terse_dump(skb
, &f
->exts
))
3075 goto nla_put_failure
;
3077 nla_nest_end(skb
, nest
);
3081 nla_put_failure_locked
:
3082 spin_unlock(&tp
->lock
);
3084 nla_nest_cancel(skb
, nest
);
3088 static int fl_tmplt_dump(struct sk_buff
*skb
, struct net
*net
, void *tmplt_priv
)
3090 struct fl_flow_tmplt
*tmplt
= tmplt_priv
;
3091 struct fl_flow_key
*key
, *mask
;
3092 struct nlattr
*nest
;
3094 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
3096 goto nla_put_failure
;
3098 key
= &tmplt
->dummy_key
;
3099 mask
= &tmplt
->mask
;
3101 if (fl_dump_key(skb
, net
, key
, mask
))
3102 goto nla_put_failure
;
3104 nla_nest_end(skb
, nest
);
3109 nla_nest_cancel(skb
, nest
);
3113 static void fl_bind_class(void *fh
, u32 classid
, unsigned long cl
, void *q
,
3116 struct cls_fl_filter
*f
= fh
;
3118 if (f
&& f
->res
.classid
== classid
) {
3120 __tcf_bind_filter(q
, &f
->res
, base
);
3122 __tcf_unbind_filter(q
, &f
->res
);
3126 static bool fl_delete_empty(struct tcf_proto
*tp
)
3128 struct cls_fl_head
*head
= fl_head_dereference(tp
);
3130 spin_lock(&tp
->lock
);
3131 tp
->deleting
= idr_is_empty(&head
->handle_idr
);
3132 spin_unlock(&tp
->lock
);
3134 return tp
->deleting
;
3137 static struct tcf_proto_ops cls_fl_ops __read_mostly
= {
3139 .classify
= fl_classify
,
3141 .destroy
= fl_destroy
,
3144 .change
= fl_change
,
3145 .delete = fl_delete
,
3146 .delete_empty
= fl_delete_empty
,
3148 .reoffload
= fl_reoffload
,
3149 .hw_add
= fl_hw_add
,
3150 .hw_del
= fl_hw_del
,
3152 .terse_dump
= fl_terse_dump
,
3153 .bind_class
= fl_bind_class
,
3154 .tmplt_create
= fl_tmplt_create
,
3155 .tmplt_destroy
= fl_tmplt_destroy
,
3156 .tmplt_dump
= fl_tmplt_dump
,
3157 .owner
= THIS_MODULE
,
3158 .flags
= TCF_PROTO_OPS_DOIT_UNLOCKED
,
3161 static int __init
cls_fl_init(void)
3163 return register_tcf_proto_ops(&cls_fl_ops
);
3166 static void __exit
cls_fl_exit(void)
3168 unregister_tcf_proto_ops(&cls_fl_ops
);
3171 module_init(cls_fl_init
);
3172 module_exit(cls_fl_exit
);
3174 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3175 MODULE_DESCRIPTION("Flower classifier");
3176 MODULE_LICENSE("GPL v2");