2 * net/sched/cls_flower.c Flower classifier
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/rhashtable.h>
16 #include <linux/workqueue.h>
18 #include <linux/if_ether.h>
19 #include <linux/in6.h>
21 #include <linux/mpls.h>
23 #include <net/sch_generic.h>
24 #include <net/pkt_cls.h>
26 #include <net/flow_dissector.h>
29 #include <net/dst_metadata.h>
33 struct flow_dissector_key_control control
;
34 struct flow_dissector_key_control enc_control
;
35 struct flow_dissector_key_basic basic
;
36 struct flow_dissector_key_eth_addrs eth
;
37 struct flow_dissector_key_vlan vlan
;
39 struct flow_dissector_key_ipv4_addrs ipv4
;
40 struct flow_dissector_key_ipv6_addrs ipv6
;
42 struct flow_dissector_key_ports tp
;
43 struct flow_dissector_key_icmp icmp
;
44 struct flow_dissector_key_arp arp
;
45 struct flow_dissector_key_keyid enc_key_id
;
47 struct flow_dissector_key_ipv4_addrs enc_ipv4
;
48 struct flow_dissector_key_ipv6_addrs enc_ipv6
;
50 struct flow_dissector_key_ports enc_tp
;
51 struct flow_dissector_key_mpls mpls
;
52 struct flow_dissector_key_tcp tcp
;
53 struct flow_dissector_key_ip ip
;
54 } __aligned(BITS_PER_LONG
/ 8); /* Ensure that we can do comparisons as longs. */
56 struct fl_flow_mask_range
{
57 unsigned short int start
;
58 unsigned short int end
;
62 struct fl_flow_key key
;
63 struct fl_flow_mask_range range
;
69 struct fl_flow_mask mask
;
70 struct flow_dissector dissector
;
72 struct list_head filters
;
73 struct rhashtable_params ht_params
;
75 struct work_struct work
;
78 struct idr handle_idr
;
81 struct cls_fl_filter
{
82 struct rhash_head ht_node
;
83 struct fl_flow_key mkey
;
85 struct tcf_result res
;
86 struct fl_flow_key key
;
87 struct list_head list
;
91 struct work_struct work
;
94 struct net_device
*hw_dev
;
97 static unsigned short int fl_mask_range(const struct fl_flow_mask
*mask
)
99 return mask
->range
.end
- mask
->range
.start
;
102 static void fl_mask_update_range(struct fl_flow_mask
*mask
)
104 const u8
*bytes
= (const u8
*) &mask
->key
;
105 size_t size
= sizeof(mask
->key
);
106 size_t i
, first
= 0, last
= size
- 1;
108 for (i
= 0; i
< sizeof(mask
->key
); i
++) {
115 mask
->range
.start
= rounddown(first
, sizeof(long));
116 mask
->range
.end
= roundup(last
+ 1, sizeof(long));
119 static void *fl_key_get_start(struct fl_flow_key
*key
,
120 const struct fl_flow_mask
*mask
)
122 return (u8
*) key
+ mask
->range
.start
;
125 static void fl_set_masked_key(struct fl_flow_key
*mkey
, struct fl_flow_key
*key
,
126 struct fl_flow_mask
*mask
)
128 const long *lkey
= fl_key_get_start(key
, mask
);
129 const long *lmask
= fl_key_get_start(&mask
->key
, mask
);
130 long *lmkey
= fl_key_get_start(mkey
, mask
);
133 for (i
= 0; i
< fl_mask_range(mask
); i
+= sizeof(long))
134 *lmkey
++ = *lkey
++ & *lmask
++;
137 static void fl_clear_masked_range(struct fl_flow_key
*key
,
138 struct fl_flow_mask
*mask
)
140 memset(fl_key_get_start(key
, mask
), 0, fl_mask_range(mask
));
143 static struct cls_fl_filter
*fl_lookup(struct cls_fl_head
*head
,
144 struct fl_flow_key
*mkey
)
146 return rhashtable_lookup_fast(&head
->ht
,
147 fl_key_get_start(mkey
, &head
->mask
),
151 static int fl_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
152 struct tcf_result
*res
)
154 struct cls_fl_head
*head
= rcu_dereference_bh(tp
->root
);
155 struct cls_fl_filter
*f
;
156 struct fl_flow_key skb_key
;
157 struct fl_flow_key skb_mkey
;
158 struct ip_tunnel_info
*info
;
160 if (!atomic_read(&head
->ht
.nelems
))
163 fl_clear_masked_range(&skb_key
, &head
->mask
);
165 info
= skb_tunnel_info(skb
);
167 struct ip_tunnel_key
*key
= &info
->key
;
169 switch (ip_tunnel_info_af(info
)) {
171 skb_key
.enc_control
.addr_type
=
172 FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
173 skb_key
.enc_ipv4
.src
= key
->u
.ipv4
.src
;
174 skb_key
.enc_ipv4
.dst
= key
->u
.ipv4
.dst
;
177 skb_key
.enc_control
.addr_type
=
178 FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
179 skb_key
.enc_ipv6
.src
= key
->u
.ipv6
.src
;
180 skb_key
.enc_ipv6
.dst
= key
->u
.ipv6
.dst
;
184 skb_key
.enc_key_id
.keyid
= tunnel_id_to_key32(key
->tun_id
);
185 skb_key
.enc_tp
.src
= key
->tp_src
;
186 skb_key
.enc_tp
.dst
= key
->tp_dst
;
189 skb_key
.indev_ifindex
= skb
->skb_iif
;
190 /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
191 * so do it rather here.
193 skb_key
.basic
.n_proto
= skb
->protocol
;
194 skb_flow_dissect(skb
, &head
->dissector
, &skb_key
, 0);
196 fl_set_masked_key(&skb_mkey
, &skb_key
, &head
->mask
);
198 f
= fl_lookup(head
, &skb_mkey
);
199 if (f
&& !tc_skip_sw(f
->flags
)) {
201 return tcf_exts_exec(skb
, &f
->exts
, res
);
206 static int fl_init(struct tcf_proto
*tp
)
208 struct cls_fl_head
*head
;
210 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
214 INIT_LIST_HEAD_RCU(&head
->filters
);
215 rcu_assign_pointer(tp
->root
, head
);
216 idr_init(&head
->handle_idr
);
221 static void __fl_destroy_filter(struct cls_fl_filter
*f
)
223 tcf_exts_destroy(&f
->exts
);
224 tcf_exts_put_net(&f
->exts
);
228 static void fl_destroy_filter_work(struct work_struct
*work
)
230 struct cls_fl_filter
*f
= container_of(work
, struct cls_fl_filter
, work
);
233 __fl_destroy_filter(f
);
237 static void fl_destroy_filter(struct rcu_head
*head
)
239 struct cls_fl_filter
*f
= container_of(head
, struct cls_fl_filter
, rcu
);
241 INIT_WORK(&f
->work
, fl_destroy_filter_work
);
242 tcf_queue_work(&f
->work
);
245 static void fl_hw_destroy_filter(struct tcf_proto
*tp
, struct cls_fl_filter
*f
)
247 struct tc_cls_flower_offload cls_flower
= {};
248 struct net_device
*dev
= f
->hw_dev
;
250 if (!tc_can_offload(dev
))
253 tc_cls_common_offload_init(&cls_flower
.common
, tp
);
254 cls_flower
.command
= TC_CLSFLOWER_DESTROY
;
255 cls_flower
.cookie
= (unsigned long) f
;
256 cls_flower
.egress_dev
= f
->hw_dev
!= tp
->q
->dev_queue
->dev
;
258 dev
->netdev_ops
->ndo_setup_tc(dev
, TC_SETUP_CLSFLOWER
, &cls_flower
);
261 static int fl_hw_replace_filter(struct tcf_proto
*tp
,
262 struct flow_dissector
*dissector
,
263 struct fl_flow_key
*mask
,
264 struct cls_fl_filter
*f
)
266 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
267 struct tc_cls_flower_offload cls_flower
= {};
270 if (!tc_can_offload(dev
)) {
271 if (tcf_exts_get_dev(dev
, &f
->exts
, &f
->hw_dev
) ||
272 (f
->hw_dev
&& !tc_can_offload(f
->hw_dev
))) {
274 return tc_skip_sw(f
->flags
) ? -EINVAL
: 0;
277 cls_flower
.egress_dev
= true;
282 tc_cls_common_offload_init(&cls_flower
.common
, tp
);
283 cls_flower
.command
= TC_CLSFLOWER_REPLACE
;
284 cls_flower
.cookie
= (unsigned long) f
;
285 cls_flower
.dissector
= dissector
;
286 cls_flower
.mask
= mask
;
287 cls_flower
.key
= &f
->mkey
;
288 cls_flower
.exts
= &f
->exts
;
290 err
= dev
->netdev_ops
->ndo_setup_tc(dev
, TC_SETUP_CLSFLOWER
,
293 f
->flags
|= TCA_CLS_FLAGS_IN_HW
;
295 if (tc_skip_sw(f
->flags
))
300 static void fl_hw_update_stats(struct tcf_proto
*tp
, struct cls_fl_filter
*f
)
302 struct tc_cls_flower_offload cls_flower
= {};
303 struct net_device
*dev
= f
->hw_dev
;
305 if (!tc_can_offload(dev
))
308 tc_cls_common_offload_init(&cls_flower
.common
, tp
);
309 cls_flower
.command
= TC_CLSFLOWER_STATS
;
310 cls_flower
.cookie
= (unsigned long) f
;
311 cls_flower
.exts
= &f
->exts
;
312 cls_flower
.egress_dev
= f
->hw_dev
!= tp
->q
->dev_queue
->dev
;
314 dev
->netdev_ops
->ndo_setup_tc(dev
, TC_SETUP_CLSFLOWER
,
318 static void __fl_delete(struct tcf_proto
*tp
, struct cls_fl_filter
*f
)
320 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
322 idr_remove_ext(&head
->handle_idr
, f
->handle
);
323 list_del_rcu(&f
->list
);
324 if (!tc_skip_hw(f
->flags
))
325 fl_hw_destroy_filter(tp
, f
);
326 tcf_unbind_filter(tp
, &f
->res
);
327 if (tcf_exts_get_net(&f
->exts
))
328 call_rcu(&f
->rcu
, fl_destroy_filter
);
330 __fl_destroy_filter(f
);
333 static void fl_destroy_sleepable(struct work_struct
*work
)
335 struct cls_fl_head
*head
= container_of(work
, struct cls_fl_head
,
337 if (head
->mask_assigned
)
338 rhashtable_destroy(&head
->ht
);
340 module_put(THIS_MODULE
);
343 static void fl_destroy_rcu(struct rcu_head
*rcu
)
345 struct cls_fl_head
*head
= container_of(rcu
, struct cls_fl_head
, rcu
);
347 INIT_WORK(&head
->work
, fl_destroy_sleepable
);
348 schedule_work(&head
->work
);
351 static void fl_destroy(struct tcf_proto
*tp
)
353 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
354 struct cls_fl_filter
*f
, *next
;
356 list_for_each_entry_safe(f
, next
, &head
->filters
, list
)
358 idr_destroy(&head
->handle_idr
);
360 __module_get(THIS_MODULE
);
361 call_rcu(&head
->rcu
, fl_destroy_rcu
);
364 static void *fl_get(struct tcf_proto
*tp
, u32 handle
)
366 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
368 return idr_find_ext(&head
->handle_idr
, handle
);
371 static const struct nla_policy fl_policy
[TCA_FLOWER_MAX
+ 1] = {
372 [TCA_FLOWER_UNSPEC
] = { .type
= NLA_UNSPEC
},
373 [TCA_FLOWER_CLASSID
] = { .type
= NLA_U32
},
374 [TCA_FLOWER_INDEV
] = { .type
= NLA_STRING
,
376 [TCA_FLOWER_KEY_ETH_DST
] = { .len
= ETH_ALEN
},
377 [TCA_FLOWER_KEY_ETH_DST_MASK
] = { .len
= ETH_ALEN
},
378 [TCA_FLOWER_KEY_ETH_SRC
] = { .len
= ETH_ALEN
},
379 [TCA_FLOWER_KEY_ETH_SRC_MASK
] = { .len
= ETH_ALEN
},
380 [TCA_FLOWER_KEY_ETH_TYPE
] = { .type
= NLA_U16
},
381 [TCA_FLOWER_KEY_IP_PROTO
] = { .type
= NLA_U8
},
382 [TCA_FLOWER_KEY_IPV4_SRC
] = { .type
= NLA_U32
},
383 [TCA_FLOWER_KEY_IPV4_SRC_MASK
] = { .type
= NLA_U32
},
384 [TCA_FLOWER_KEY_IPV4_DST
] = { .type
= NLA_U32
},
385 [TCA_FLOWER_KEY_IPV4_DST_MASK
] = { .type
= NLA_U32
},
386 [TCA_FLOWER_KEY_IPV6_SRC
] = { .len
= sizeof(struct in6_addr
) },
387 [TCA_FLOWER_KEY_IPV6_SRC_MASK
] = { .len
= sizeof(struct in6_addr
) },
388 [TCA_FLOWER_KEY_IPV6_DST
] = { .len
= sizeof(struct in6_addr
) },
389 [TCA_FLOWER_KEY_IPV6_DST_MASK
] = { .len
= sizeof(struct in6_addr
) },
390 [TCA_FLOWER_KEY_TCP_SRC
] = { .type
= NLA_U16
},
391 [TCA_FLOWER_KEY_TCP_DST
] = { .type
= NLA_U16
},
392 [TCA_FLOWER_KEY_UDP_SRC
] = { .type
= NLA_U16
},
393 [TCA_FLOWER_KEY_UDP_DST
] = { .type
= NLA_U16
},
394 [TCA_FLOWER_KEY_VLAN_ID
] = { .type
= NLA_U16
},
395 [TCA_FLOWER_KEY_VLAN_PRIO
] = { .type
= NLA_U8
},
396 [TCA_FLOWER_KEY_VLAN_ETH_TYPE
] = { .type
= NLA_U16
},
397 [TCA_FLOWER_KEY_ENC_KEY_ID
] = { .type
= NLA_U32
},
398 [TCA_FLOWER_KEY_ENC_IPV4_SRC
] = { .type
= NLA_U32
},
399 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
] = { .type
= NLA_U32
},
400 [TCA_FLOWER_KEY_ENC_IPV4_DST
] = { .type
= NLA_U32
},
401 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
] = { .type
= NLA_U32
},
402 [TCA_FLOWER_KEY_ENC_IPV6_SRC
] = { .len
= sizeof(struct in6_addr
) },
403 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
] = { .len
= sizeof(struct in6_addr
) },
404 [TCA_FLOWER_KEY_ENC_IPV6_DST
] = { .len
= sizeof(struct in6_addr
) },
405 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
] = { .len
= sizeof(struct in6_addr
) },
406 [TCA_FLOWER_KEY_TCP_SRC_MASK
] = { .type
= NLA_U16
},
407 [TCA_FLOWER_KEY_TCP_DST_MASK
] = { .type
= NLA_U16
},
408 [TCA_FLOWER_KEY_UDP_SRC_MASK
] = { .type
= NLA_U16
},
409 [TCA_FLOWER_KEY_UDP_DST_MASK
] = { .type
= NLA_U16
},
410 [TCA_FLOWER_KEY_SCTP_SRC_MASK
] = { .type
= NLA_U16
},
411 [TCA_FLOWER_KEY_SCTP_DST_MASK
] = { .type
= NLA_U16
},
412 [TCA_FLOWER_KEY_SCTP_SRC
] = { .type
= NLA_U16
},
413 [TCA_FLOWER_KEY_SCTP_DST
] = { .type
= NLA_U16
},
414 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
] = { .type
= NLA_U16
},
415 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
] = { .type
= NLA_U16
},
416 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT
] = { .type
= NLA_U16
},
417 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
] = { .type
= NLA_U16
},
418 [TCA_FLOWER_KEY_FLAGS
] = { .type
= NLA_U32
},
419 [TCA_FLOWER_KEY_FLAGS_MASK
] = { .type
= NLA_U32
},
420 [TCA_FLOWER_KEY_ICMPV4_TYPE
] = { .type
= NLA_U8
},
421 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK
] = { .type
= NLA_U8
},
422 [TCA_FLOWER_KEY_ICMPV4_CODE
] = { .type
= NLA_U8
},
423 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK
] = { .type
= NLA_U8
},
424 [TCA_FLOWER_KEY_ICMPV6_TYPE
] = { .type
= NLA_U8
},
425 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK
] = { .type
= NLA_U8
},
426 [TCA_FLOWER_KEY_ICMPV6_CODE
] = { .type
= NLA_U8
},
427 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK
] = { .type
= NLA_U8
},
428 [TCA_FLOWER_KEY_ARP_SIP
] = { .type
= NLA_U32
},
429 [TCA_FLOWER_KEY_ARP_SIP_MASK
] = { .type
= NLA_U32
},
430 [TCA_FLOWER_KEY_ARP_TIP
] = { .type
= NLA_U32
},
431 [TCA_FLOWER_KEY_ARP_TIP_MASK
] = { .type
= NLA_U32
},
432 [TCA_FLOWER_KEY_ARP_OP
] = { .type
= NLA_U8
},
433 [TCA_FLOWER_KEY_ARP_OP_MASK
] = { .type
= NLA_U8
},
434 [TCA_FLOWER_KEY_ARP_SHA
] = { .len
= ETH_ALEN
},
435 [TCA_FLOWER_KEY_ARP_SHA_MASK
] = { .len
= ETH_ALEN
},
436 [TCA_FLOWER_KEY_ARP_THA
] = { .len
= ETH_ALEN
},
437 [TCA_FLOWER_KEY_ARP_THA_MASK
] = { .len
= ETH_ALEN
},
438 [TCA_FLOWER_KEY_MPLS_TTL
] = { .type
= NLA_U8
},
439 [TCA_FLOWER_KEY_MPLS_BOS
] = { .type
= NLA_U8
},
440 [TCA_FLOWER_KEY_MPLS_TC
] = { .type
= NLA_U8
},
441 [TCA_FLOWER_KEY_MPLS_LABEL
] = { .type
= NLA_U32
},
442 [TCA_FLOWER_KEY_TCP_FLAGS
] = { .type
= NLA_U16
},
443 [TCA_FLOWER_KEY_TCP_FLAGS_MASK
] = { .type
= NLA_U16
},
444 [TCA_FLOWER_KEY_IP_TOS
] = { .type
= NLA_U8
},
445 [TCA_FLOWER_KEY_IP_TOS_MASK
] = { .type
= NLA_U8
},
446 [TCA_FLOWER_KEY_IP_TTL
] = { .type
= NLA_U8
},
447 [TCA_FLOWER_KEY_IP_TTL_MASK
] = { .type
= NLA_U8
},
450 static void fl_set_key_val(struct nlattr
**tb
,
451 void *val
, int val_type
,
452 void *mask
, int mask_type
, int len
)
456 memcpy(val
, nla_data(tb
[val_type
]), len
);
457 if (mask_type
== TCA_FLOWER_UNSPEC
|| !tb
[mask_type
])
458 memset(mask
, 0xff, len
);
460 memcpy(mask
, nla_data(tb
[mask_type
]), len
);
463 static int fl_set_key_mpls(struct nlattr
**tb
,
464 struct flow_dissector_key_mpls
*key_val
,
465 struct flow_dissector_key_mpls
*key_mask
)
467 if (tb
[TCA_FLOWER_KEY_MPLS_TTL
]) {
468 key_val
->mpls_ttl
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_TTL
]);
469 key_mask
->mpls_ttl
= MPLS_TTL_MASK
;
471 if (tb
[TCA_FLOWER_KEY_MPLS_BOS
]) {
472 u8 bos
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_BOS
]);
474 if (bos
& ~MPLS_BOS_MASK
)
476 key_val
->mpls_bos
= bos
;
477 key_mask
->mpls_bos
= MPLS_BOS_MASK
;
479 if (tb
[TCA_FLOWER_KEY_MPLS_TC
]) {
480 u8 tc
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_TC
]);
482 if (tc
& ~MPLS_TC_MASK
)
484 key_val
->mpls_tc
= tc
;
485 key_mask
->mpls_tc
= MPLS_TC_MASK
;
487 if (tb
[TCA_FLOWER_KEY_MPLS_LABEL
]) {
488 u32 label
= nla_get_u32(tb
[TCA_FLOWER_KEY_MPLS_LABEL
]);
490 if (label
& ~MPLS_LABEL_MASK
)
492 key_val
->mpls_label
= label
;
493 key_mask
->mpls_label
= MPLS_LABEL_MASK
;
498 static void fl_set_key_vlan(struct nlattr
**tb
,
499 struct flow_dissector_key_vlan
*key_val
,
500 struct flow_dissector_key_vlan
*key_mask
)
502 #define VLAN_PRIORITY_MASK 0x7
504 if (tb
[TCA_FLOWER_KEY_VLAN_ID
]) {
506 nla_get_u16(tb
[TCA_FLOWER_KEY_VLAN_ID
]) & VLAN_VID_MASK
;
507 key_mask
->vlan_id
= VLAN_VID_MASK
;
509 if (tb
[TCA_FLOWER_KEY_VLAN_PRIO
]) {
510 key_val
->vlan_priority
=
511 nla_get_u8(tb
[TCA_FLOWER_KEY_VLAN_PRIO
]) &
513 key_mask
->vlan_priority
= VLAN_PRIORITY_MASK
;
517 static void fl_set_key_flag(u32 flower_key
, u32 flower_mask
,
518 u32
*dissector_key
, u32
*dissector_mask
,
519 u32 flower_flag_bit
, u32 dissector_flag_bit
)
521 if (flower_mask
& flower_flag_bit
) {
522 *dissector_mask
|= dissector_flag_bit
;
523 if (flower_key
& flower_flag_bit
)
524 *dissector_key
|= dissector_flag_bit
;
528 static int fl_set_key_flags(struct nlattr
**tb
,
529 u32
*flags_key
, u32
*flags_mask
)
533 /* mask is mandatory for flags */
534 if (!tb
[TCA_FLOWER_KEY_FLAGS_MASK
])
537 key
= be32_to_cpu(nla_get_u32(tb
[TCA_FLOWER_KEY_FLAGS
]));
538 mask
= be32_to_cpu(nla_get_u32(tb
[TCA_FLOWER_KEY_FLAGS_MASK
]));
543 fl_set_key_flag(key
, mask
, flags_key
, flags_mask
,
544 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT
, FLOW_DIS_IS_FRAGMENT
);
549 static void fl_set_key_ip(struct nlattr
**tb
,
550 struct flow_dissector_key_ip
*key
,
551 struct flow_dissector_key_ip
*mask
)
553 fl_set_key_val(tb
, &key
->tos
, TCA_FLOWER_KEY_IP_TOS
,
554 &mask
->tos
, TCA_FLOWER_KEY_IP_TOS_MASK
,
557 fl_set_key_val(tb
, &key
->ttl
, TCA_FLOWER_KEY_IP_TTL
,
558 &mask
->ttl
, TCA_FLOWER_KEY_IP_TTL_MASK
,
562 static int fl_set_key(struct net
*net
, struct nlattr
**tb
,
563 struct fl_flow_key
*key
, struct fl_flow_key
*mask
)
567 #ifdef CONFIG_NET_CLS_IND
568 if (tb
[TCA_FLOWER_INDEV
]) {
569 int err
= tcf_change_indev(net
, tb
[TCA_FLOWER_INDEV
]);
572 key
->indev_ifindex
= err
;
573 mask
->indev_ifindex
= 0xffffffff;
577 fl_set_key_val(tb
, key
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST
,
578 mask
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST_MASK
,
579 sizeof(key
->eth
.dst
));
580 fl_set_key_val(tb
, key
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC
,
581 mask
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC_MASK
,
582 sizeof(key
->eth
.src
));
584 if (tb
[TCA_FLOWER_KEY_ETH_TYPE
]) {
585 ethertype
= nla_get_be16(tb
[TCA_FLOWER_KEY_ETH_TYPE
]);
587 if (ethertype
== htons(ETH_P_8021Q
)) {
588 fl_set_key_vlan(tb
, &key
->vlan
, &mask
->vlan
);
589 fl_set_key_val(tb
, &key
->basic
.n_proto
,
590 TCA_FLOWER_KEY_VLAN_ETH_TYPE
,
591 &mask
->basic
.n_proto
, TCA_FLOWER_UNSPEC
,
592 sizeof(key
->basic
.n_proto
));
594 key
->basic
.n_proto
= ethertype
;
595 mask
->basic
.n_proto
= cpu_to_be16(~0);
599 if (key
->basic
.n_proto
== htons(ETH_P_IP
) ||
600 key
->basic
.n_proto
== htons(ETH_P_IPV6
)) {
601 fl_set_key_val(tb
, &key
->basic
.ip_proto
, TCA_FLOWER_KEY_IP_PROTO
,
602 &mask
->basic
.ip_proto
, TCA_FLOWER_UNSPEC
,
603 sizeof(key
->basic
.ip_proto
));
604 fl_set_key_ip(tb
, &key
->ip
, &mask
->ip
);
607 if (tb
[TCA_FLOWER_KEY_IPV4_SRC
] || tb
[TCA_FLOWER_KEY_IPV4_DST
]) {
608 key
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
609 mask
->control
.addr_type
= ~0;
610 fl_set_key_val(tb
, &key
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC
,
611 &mask
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC_MASK
,
612 sizeof(key
->ipv4
.src
));
613 fl_set_key_val(tb
, &key
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST
,
614 &mask
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST_MASK
,
615 sizeof(key
->ipv4
.dst
));
616 } else if (tb
[TCA_FLOWER_KEY_IPV6_SRC
] || tb
[TCA_FLOWER_KEY_IPV6_DST
]) {
617 key
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
618 mask
->control
.addr_type
= ~0;
619 fl_set_key_val(tb
, &key
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC
,
620 &mask
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC_MASK
,
621 sizeof(key
->ipv6
.src
));
622 fl_set_key_val(tb
, &key
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST
,
623 &mask
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST_MASK
,
624 sizeof(key
->ipv6
.dst
));
627 if (key
->basic
.ip_proto
== IPPROTO_TCP
) {
628 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC
,
629 &mask
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC_MASK
,
630 sizeof(key
->tp
.src
));
631 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST
,
632 &mask
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST_MASK
,
633 sizeof(key
->tp
.dst
));
634 fl_set_key_val(tb
, &key
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS
,
635 &mask
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS_MASK
,
636 sizeof(key
->tcp
.flags
));
637 } else if (key
->basic
.ip_proto
== IPPROTO_UDP
) {
638 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC
,
639 &mask
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC_MASK
,
640 sizeof(key
->tp
.src
));
641 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST
,
642 &mask
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST_MASK
,
643 sizeof(key
->tp
.dst
));
644 } else if (key
->basic
.ip_proto
== IPPROTO_SCTP
) {
645 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC
,
646 &mask
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC_MASK
,
647 sizeof(key
->tp
.src
));
648 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST
,
649 &mask
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST_MASK
,
650 sizeof(key
->tp
.dst
));
651 } else if (key
->basic
.n_proto
== htons(ETH_P_IP
) &&
652 key
->basic
.ip_proto
== IPPROTO_ICMP
) {
653 fl_set_key_val(tb
, &key
->icmp
.type
, TCA_FLOWER_KEY_ICMPV4_TYPE
,
655 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK
,
656 sizeof(key
->icmp
.type
));
657 fl_set_key_val(tb
, &key
->icmp
.code
, TCA_FLOWER_KEY_ICMPV4_CODE
,
659 TCA_FLOWER_KEY_ICMPV4_CODE_MASK
,
660 sizeof(key
->icmp
.code
));
661 } else if (key
->basic
.n_proto
== htons(ETH_P_IPV6
) &&
662 key
->basic
.ip_proto
== IPPROTO_ICMPV6
) {
663 fl_set_key_val(tb
, &key
->icmp
.type
, TCA_FLOWER_KEY_ICMPV6_TYPE
,
665 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK
,
666 sizeof(key
->icmp
.type
));
667 fl_set_key_val(tb
, &key
->icmp
.code
, TCA_FLOWER_KEY_ICMPV6_CODE
,
669 TCA_FLOWER_KEY_ICMPV6_CODE_MASK
,
670 sizeof(key
->icmp
.code
));
671 } else if (key
->basic
.n_proto
== htons(ETH_P_MPLS_UC
) ||
672 key
->basic
.n_proto
== htons(ETH_P_MPLS_MC
)) {
673 ret
= fl_set_key_mpls(tb
, &key
->mpls
, &mask
->mpls
);
676 } else if (key
->basic
.n_proto
== htons(ETH_P_ARP
) ||
677 key
->basic
.n_proto
== htons(ETH_P_RARP
)) {
678 fl_set_key_val(tb
, &key
->arp
.sip
, TCA_FLOWER_KEY_ARP_SIP
,
679 &mask
->arp
.sip
, TCA_FLOWER_KEY_ARP_SIP_MASK
,
680 sizeof(key
->arp
.sip
));
681 fl_set_key_val(tb
, &key
->arp
.tip
, TCA_FLOWER_KEY_ARP_TIP
,
682 &mask
->arp
.tip
, TCA_FLOWER_KEY_ARP_TIP_MASK
,
683 sizeof(key
->arp
.tip
));
684 fl_set_key_val(tb
, &key
->arp
.op
, TCA_FLOWER_KEY_ARP_OP
,
685 &mask
->arp
.op
, TCA_FLOWER_KEY_ARP_OP_MASK
,
686 sizeof(key
->arp
.op
));
687 fl_set_key_val(tb
, key
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA
,
688 mask
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA_MASK
,
689 sizeof(key
->arp
.sha
));
690 fl_set_key_val(tb
, key
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA
,
691 mask
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA_MASK
,
692 sizeof(key
->arp
.tha
));
695 if (tb
[TCA_FLOWER_KEY_ENC_IPV4_SRC
] ||
696 tb
[TCA_FLOWER_KEY_ENC_IPV4_DST
]) {
697 key
->enc_control
.addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
698 mask
->enc_control
.addr_type
= ~0;
699 fl_set_key_val(tb
, &key
->enc_ipv4
.src
,
700 TCA_FLOWER_KEY_ENC_IPV4_SRC
,
702 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
,
703 sizeof(key
->enc_ipv4
.src
));
704 fl_set_key_val(tb
, &key
->enc_ipv4
.dst
,
705 TCA_FLOWER_KEY_ENC_IPV4_DST
,
707 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
,
708 sizeof(key
->enc_ipv4
.dst
));
711 if (tb
[TCA_FLOWER_KEY_ENC_IPV6_SRC
] ||
712 tb
[TCA_FLOWER_KEY_ENC_IPV6_DST
]) {
713 key
->enc_control
.addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
714 mask
->enc_control
.addr_type
= ~0;
715 fl_set_key_val(tb
, &key
->enc_ipv6
.src
,
716 TCA_FLOWER_KEY_ENC_IPV6_SRC
,
718 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
,
719 sizeof(key
->enc_ipv6
.src
));
720 fl_set_key_val(tb
, &key
->enc_ipv6
.dst
,
721 TCA_FLOWER_KEY_ENC_IPV6_DST
,
723 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
,
724 sizeof(key
->enc_ipv6
.dst
));
727 fl_set_key_val(tb
, &key
->enc_key_id
.keyid
, TCA_FLOWER_KEY_ENC_KEY_ID
,
728 &mask
->enc_key_id
.keyid
, TCA_FLOWER_UNSPEC
,
729 sizeof(key
->enc_key_id
.keyid
));
731 fl_set_key_val(tb
, &key
->enc_tp
.src
, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
,
732 &mask
->enc_tp
.src
, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
,
733 sizeof(key
->enc_tp
.src
));
735 fl_set_key_val(tb
, &key
->enc_tp
.dst
, TCA_FLOWER_KEY_ENC_UDP_DST_PORT
,
736 &mask
->enc_tp
.dst
, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
,
737 sizeof(key
->enc_tp
.dst
));
739 if (tb
[TCA_FLOWER_KEY_FLAGS
])
740 ret
= fl_set_key_flags(tb
, &key
->control
.flags
, &mask
->control
.flags
);
745 static bool fl_mask_eq(struct fl_flow_mask
*mask1
,
746 struct fl_flow_mask
*mask2
)
748 const long *lmask1
= fl_key_get_start(&mask1
->key
, mask1
);
749 const long *lmask2
= fl_key_get_start(&mask2
->key
, mask2
);
751 return !memcmp(&mask1
->range
, &mask2
->range
, sizeof(mask1
->range
)) &&
752 !memcmp(lmask1
, lmask2
, fl_mask_range(mask1
));
755 static const struct rhashtable_params fl_ht_params
= {
756 .key_offset
= offsetof(struct cls_fl_filter
, mkey
), /* base offset */
757 .head_offset
= offsetof(struct cls_fl_filter
, ht_node
),
758 .automatic_shrinking
= true,
761 static int fl_init_hashtable(struct cls_fl_head
*head
,
762 struct fl_flow_mask
*mask
)
764 head
->ht_params
= fl_ht_params
;
765 head
->ht_params
.key_len
= fl_mask_range(mask
);
766 head
->ht_params
.key_offset
+= mask
->range
.start
;
768 return rhashtable_init(&head
->ht
, &head
->ht_params
);
771 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
772 #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
774 #define FL_KEY_IS_MASKED(mask, member) \
775 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
776 0, FL_KEY_MEMBER_SIZE(member)) \
778 #define FL_KEY_SET(keys, cnt, id, member) \
780 keys[cnt].key_id = id; \
781 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
785 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
787 if (FL_KEY_IS_MASKED(mask, member)) \
788 FL_KEY_SET(keys, cnt, id, member); \
791 static void fl_init_dissector(struct cls_fl_head
*head
,
792 struct fl_flow_mask
*mask
)
794 struct flow_dissector_key keys
[FLOW_DISSECTOR_KEY_MAX
];
797 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_CONTROL
, control
);
798 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_BASIC
, basic
);
799 FL_KEY_SET_IF_MASKED(&mask
->key
, keys
, cnt
,
800 FLOW_DISSECTOR_KEY_ETH_ADDRS
, eth
);
801 FL_KEY_SET_IF_MASKED(&mask
->key
, keys
, cnt
,
802 FLOW_DISSECTOR_KEY_IPV4_ADDRS
, ipv4
);
803 FL_KEY_SET_IF_MASKED(&mask
->key
, keys
, cnt
,
804 FLOW_DISSECTOR_KEY_IPV6_ADDRS
, ipv6
);
805 FL_KEY_SET_IF_MASKED(&mask
->key
, keys
, cnt
,
806 FLOW_DISSECTOR_KEY_PORTS
, tp
);
807 FL_KEY_SET_IF_MASKED(&mask
->key
, keys
, cnt
,
808 FLOW_DISSECTOR_KEY_IP
, ip
);
809 FL_KEY_SET_IF_MASKED(&mask
->key
, keys
, cnt
,
810 FLOW_DISSECTOR_KEY_TCP
, tcp
);
811 FL_KEY_SET_IF_MASKED(&mask
->key
, keys
, cnt
,
812 FLOW_DISSECTOR_KEY_ICMP
, icmp
);
813 FL_KEY_SET_IF_MASKED(&mask
->key
, keys
, cnt
,
814 FLOW_DISSECTOR_KEY_ARP
, arp
);
815 FL_KEY_SET_IF_MASKED(&mask
->key
, keys
, cnt
,
816 FLOW_DISSECTOR_KEY_MPLS
, mpls
);
817 FL_KEY_SET_IF_MASKED(&mask
->key
, keys
, cnt
,
818 FLOW_DISSECTOR_KEY_VLAN
, vlan
);
819 FL_KEY_SET_IF_MASKED(&mask
->key
, keys
, cnt
,
820 FLOW_DISSECTOR_KEY_ENC_KEYID
, enc_key_id
);
821 FL_KEY_SET_IF_MASKED(&mask
->key
, keys
, cnt
,
822 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
, enc_ipv4
);
823 FL_KEY_SET_IF_MASKED(&mask
->key
, keys
, cnt
,
824 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
, enc_ipv6
);
825 if (FL_KEY_IS_MASKED(&mask
->key
, enc_ipv4
) ||
826 FL_KEY_IS_MASKED(&mask
->key
, enc_ipv6
))
827 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_ENC_CONTROL
,
829 FL_KEY_SET_IF_MASKED(&mask
->key
, keys
, cnt
,
830 FLOW_DISSECTOR_KEY_ENC_PORTS
, enc_tp
);
832 skb_flow_dissector_init(&head
->dissector
, keys
, cnt
);
835 static int fl_check_assign_mask(struct cls_fl_head
*head
,
836 struct fl_flow_mask
*mask
)
840 if (head
->mask_assigned
) {
841 if (!fl_mask_eq(&head
->mask
, mask
))
847 /* Mask is not assigned yet. So assign it and init hashtable
850 err
= fl_init_hashtable(head
, mask
);
853 memcpy(&head
->mask
, mask
, sizeof(head
->mask
));
854 head
->mask_assigned
= true;
856 fl_init_dissector(head
, mask
);
861 static int fl_set_parms(struct net
*net
, struct tcf_proto
*tp
,
862 struct cls_fl_filter
*f
, struct fl_flow_mask
*mask
,
863 unsigned long base
, struct nlattr
**tb
,
864 struct nlattr
*est
, bool ovr
)
868 err
= tcf_exts_validate(net
, tp
, tb
, est
, &f
->exts
, ovr
);
872 if (tb
[TCA_FLOWER_CLASSID
]) {
873 f
->res
.classid
= nla_get_u32(tb
[TCA_FLOWER_CLASSID
]);
874 tcf_bind_filter(tp
, &f
->res
, base
);
877 err
= fl_set_key(net
, tb
, &f
->key
, &mask
->key
);
881 fl_mask_update_range(mask
);
882 fl_set_masked_key(&f
->mkey
, &f
->key
, mask
);
887 static int fl_change(struct net
*net
, struct sk_buff
*in_skb
,
888 struct tcf_proto
*tp
, unsigned long base
,
889 u32 handle
, struct nlattr
**tca
,
890 void **arg
, bool ovr
)
892 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
893 struct cls_fl_filter
*fold
= *arg
;
894 struct cls_fl_filter
*fnew
;
896 struct fl_flow_mask mask
= {};
897 unsigned long idr_index
;
900 if (!tca
[TCA_OPTIONS
])
903 tb
= kcalloc(TCA_FLOWER_MAX
+ 1, sizeof(struct nlattr
*), GFP_KERNEL
);
907 err
= nla_parse_nested(tb
, TCA_FLOWER_MAX
, tca
[TCA_OPTIONS
],
912 if (fold
&& handle
&& fold
->handle
!= handle
) {
917 fnew
= kzalloc(sizeof(*fnew
), GFP_KERNEL
);
923 err
= tcf_exts_init(&fnew
->exts
, TCA_FLOWER_ACT
, 0);
928 err
= idr_alloc_ext(&head
->handle_idr
, fnew
, &idr_index
,
929 1, 0x80000000, GFP_KERNEL
);
932 fnew
->handle
= idr_index
;
935 /* user specifies a handle and it doesn't exist */
936 if (handle
&& !fold
) {
937 err
= idr_alloc_ext(&head
->handle_idr
, fnew
, &idr_index
,
938 handle
, handle
+ 1, GFP_KERNEL
);
941 fnew
->handle
= idr_index
;
944 if (tb
[TCA_FLOWER_FLAGS
]) {
945 fnew
->flags
= nla_get_u32(tb
[TCA_FLOWER_FLAGS
]);
947 if (!tc_flags_valid(fnew
->flags
)) {
953 err
= fl_set_parms(net
, tp
, fnew
, &mask
, base
, tb
, tca
[TCA_RATE
], ovr
);
957 err
= fl_check_assign_mask(head
, &mask
);
961 if (!tc_skip_sw(fnew
->flags
)) {
962 if (!fold
&& fl_lookup(head
, &fnew
->mkey
)) {
967 err
= rhashtable_insert_fast(&head
->ht
, &fnew
->ht_node
,
973 if (!tc_skip_hw(fnew
->flags
)) {
974 err
= fl_hw_replace_filter(tp
,
982 if (!tc_in_hw(fnew
->flags
))
983 fnew
->flags
|= TCA_CLS_FLAGS_NOT_IN_HW
;
986 if (!tc_skip_sw(fold
->flags
))
987 rhashtable_remove_fast(&head
->ht
, &fold
->ht_node
,
989 if (!tc_skip_hw(fold
->flags
))
990 fl_hw_destroy_filter(tp
, fold
);
996 fnew
->handle
= handle
;
997 idr_replace_ext(&head
->handle_idr
, fnew
, fnew
->handle
);
998 list_replace_rcu(&fold
->list
, &fnew
->list
);
999 tcf_unbind_filter(tp
, &fold
->res
);
1000 tcf_exts_get_net(&fold
->exts
);
1001 call_rcu(&fold
->rcu
, fl_destroy_filter
);
1003 list_add_tail_rcu(&fnew
->list
, &head
->filters
);
1011 idr_remove_ext(&head
->handle_idr
, fnew
->handle
);
1013 tcf_exts_destroy(&fnew
->exts
);
1020 static int fl_delete(struct tcf_proto
*tp
, void *arg
, bool *last
)
1022 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
1023 struct cls_fl_filter
*f
= arg
;
1025 if (!tc_skip_sw(f
->flags
))
1026 rhashtable_remove_fast(&head
->ht
, &f
->ht_node
,
1029 *last
= list_empty(&head
->filters
);
1033 static void fl_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
1035 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
1036 struct cls_fl_filter
*f
;
1038 list_for_each_entry_rcu(f
, &head
->filters
, list
) {
1039 if (arg
->count
< arg
->skip
)
1041 if (arg
->fn(tp
, f
, arg
) < 0) {
1050 static int fl_dump_key_val(struct sk_buff
*skb
,
1051 void *val
, int val_type
,
1052 void *mask
, int mask_type
, int len
)
1056 if (!memchr_inv(mask
, 0, len
))
1058 err
= nla_put(skb
, val_type
, len
, val
);
1061 if (mask_type
!= TCA_FLOWER_UNSPEC
) {
1062 err
= nla_put(skb
, mask_type
, len
, mask
);
1069 static int fl_dump_key_mpls(struct sk_buff
*skb
,
1070 struct flow_dissector_key_mpls
*mpls_key
,
1071 struct flow_dissector_key_mpls
*mpls_mask
)
1075 if (!memchr_inv(mpls_mask
, 0, sizeof(*mpls_mask
)))
1077 if (mpls_mask
->mpls_ttl
) {
1078 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_TTL
,
1079 mpls_key
->mpls_ttl
);
1083 if (mpls_mask
->mpls_tc
) {
1084 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_TC
,
1089 if (mpls_mask
->mpls_label
) {
1090 err
= nla_put_u32(skb
, TCA_FLOWER_KEY_MPLS_LABEL
,
1091 mpls_key
->mpls_label
);
1095 if (mpls_mask
->mpls_bos
) {
1096 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_BOS
,
1097 mpls_key
->mpls_bos
);
1104 static int fl_dump_key_ip(struct sk_buff
*skb
,
1105 struct flow_dissector_key_ip
*key
,
1106 struct flow_dissector_key_ip
*mask
)
1108 if (fl_dump_key_val(skb
, &key
->tos
, TCA_FLOWER_KEY_IP_TOS
, &mask
->tos
,
1109 TCA_FLOWER_KEY_IP_TOS_MASK
, sizeof(key
->tos
)) ||
1110 fl_dump_key_val(skb
, &key
->ttl
, TCA_FLOWER_KEY_IP_TTL
, &mask
->ttl
,
1111 TCA_FLOWER_KEY_IP_TTL_MASK
, sizeof(key
->ttl
)))
1117 static int fl_dump_key_vlan(struct sk_buff
*skb
,
1118 struct flow_dissector_key_vlan
*vlan_key
,
1119 struct flow_dissector_key_vlan
*vlan_mask
)
1123 if (!memchr_inv(vlan_mask
, 0, sizeof(*vlan_mask
)))
1125 if (vlan_mask
->vlan_id
) {
1126 err
= nla_put_u16(skb
, TCA_FLOWER_KEY_VLAN_ID
,
1131 if (vlan_mask
->vlan_priority
) {
1132 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_VLAN_PRIO
,
1133 vlan_key
->vlan_priority
);
1140 static void fl_get_key_flag(u32 dissector_key
, u32 dissector_mask
,
1141 u32
*flower_key
, u32
*flower_mask
,
1142 u32 flower_flag_bit
, u32 dissector_flag_bit
)
1144 if (dissector_mask
& dissector_flag_bit
) {
1145 *flower_mask
|= flower_flag_bit
;
1146 if (dissector_key
& dissector_flag_bit
)
1147 *flower_key
|= flower_flag_bit
;
1151 static int fl_dump_key_flags(struct sk_buff
*skb
, u32 flags_key
, u32 flags_mask
)
1157 if (!memchr_inv(&flags_mask
, 0, sizeof(flags_mask
)))
1163 fl_get_key_flag(flags_key
, flags_mask
, &key
, &mask
,
1164 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT
, FLOW_DIS_IS_FRAGMENT
);
1166 _key
= cpu_to_be32(key
);
1167 _mask
= cpu_to_be32(mask
);
1169 err
= nla_put(skb
, TCA_FLOWER_KEY_FLAGS
, 4, &_key
);
1173 return nla_put(skb
, TCA_FLOWER_KEY_FLAGS_MASK
, 4, &_mask
);
1176 static int fl_dump(struct net
*net
, struct tcf_proto
*tp
, void *fh
,
1177 struct sk_buff
*skb
, struct tcmsg
*t
)
1179 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
1180 struct cls_fl_filter
*f
= fh
;
1181 struct nlattr
*nest
;
1182 struct fl_flow_key
*key
, *mask
;
1187 t
->tcm_handle
= f
->handle
;
1189 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
1191 goto nla_put_failure
;
1193 if (f
->res
.classid
&&
1194 nla_put_u32(skb
, TCA_FLOWER_CLASSID
, f
->res
.classid
))
1195 goto nla_put_failure
;
1198 mask
= &head
->mask
.key
;
1200 if (mask
->indev_ifindex
) {
1201 struct net_device
*dev
;
1203 dev
= __dev_get_by_index(net
, key
->indev_ifindex
);
1204 if (dev
&& nla_put_string(skb
, TCA_FLOWER_INDEV
, dev
->name
))
1205 goto nla_put_failure
;
1208 if (!tc_skip_hw(f
->flags
))
1209 fl_hw_update_stats(tp
, f
);
1211 if (fl_dump_key_val(skb
, key
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST
,
1212 mask
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST_MASK
,
1213 sizeof(key
->eth
.dst
)) ||
1214 fl_dump_key_val(skb
, key
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC
,
1215 mask
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC_MASK
,
1216 sizeof(key
->eth
.src
)) ||
1217 fl_dump_key_val(skb
, &key
->basic
.n_proto
, TCA_FLOWER_KEY_ETH_TYPE
,
1218 &mask
->basic
.n_proto
, TCA_FLOWER_UNSPEC
,
1219 sizeof(key
->basic
.n_proto
)))
1220 goto nla_put_failure
;
1222 if (fl_dump_key_mpls(skb
, &key
->mpls
, &mask
->mpls
))
1223 goto nla_put_failure
;
1225 if (fl_dump_key_vlan(skb
, &key
->vlan
, &mask
->vlan
))
1226 goto nla_put_failure
;
1228 if ((key
->basic
.n_proto
== htons(ETH_P_IP
) ||
1229 key
->basic
.n_proto
== htons(ETH_P_IPV6
)) &&
1230 (fl_dump_key_val(skb
, &key
->basic
.ip_proto
, TCA_FLOWER_KEY_IP_PROTO
,
1231 &mask
->basic
.ip_proto
, TCA_FLOWER_UNSPEC
,
1232 sizeof(key
->basic
.ip_proto
)) ||
1233 fl_dump_key_ip(skb
, &key
->ip
, &mask
->ip
)))
1234 goto nla_put_failure
;
1236 if (key
->control
.addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
&&
1237 (fl_dump_key_val(skb
, &key
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC
,
1238 &mask
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC_MASK
,
1239 sizeof(key
->ipv4
.src
)) ||
1240 fl_dump_key_val(skb
, &key
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST
,
1241 &mask
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST_MASK
,
1242 sizeof(key
->ipv4
.dst
))))
1243 goto nla_put_failure
;
1244 else if (key
->control
.addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
&&
1245 (fl_dump_key_val(skb
, &key
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC
,
1246 &mask
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC_MASK
,
1247 sizeof(key
->ipv6
.src
)) ||
1248 fl_dump_key_val(skb
, &key
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST
,
1249 &mask
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST_MASK
,
1250 sizeof(key
->ipv6
.dst
))))
1251 goto nla_put_failure
;
1253 if (key
->basic
.ip_proto
== IPPROTO_TCP
&&
1254 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC
,
1255 &mask
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC_MASK
,
1256 sizeof(key
->tp
.src
)) ||
1257 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST
,
1258 &mask
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST_MASK
,
1259 sizeof(key
->tp
.dst
)) ||
1260 fl_dump_key_val(skb
, &key
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS
,
1261 &mask
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS_MASK
,
1262 sizeof(key
->tcp
.flags
))))
1263 goto nla_put_failure
;
1264 else if (key
->basic
.ip_proto
== IPPROTO_UDP
&&
1265 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC
,
1266 &mask
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC_MASK
,
1267 sizeof(key
->tp
.src
)) ||
1268 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST
,
1269 &mask
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST_MASK
,
1270 sizeof(key
->tp
.dst
))))
1271 goto nla_put_failure
;
1272 else if (key
->basic
.ip_proto
== IPPROTO_SCTP
&&
1273 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC
,
1274 &mask
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC_MASK
,
1275 sizeof(key
->tp
.src
)) ||
1276 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST
,
1277 &mask
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST_MASK
,
1278 sizeof(key
->tp
.dst
))))
1279 goto nla_put_failure
;
1280 else if (key
->basic
.n_proto
== htons(ETH_P_IP
) &&
1281 key
->basic
.ip_proto
== IPPROTO_ICMP
&&
1282 (fl_dump_key_val(skb
, &key
->icmp
.type
,
1283 TCA_FLOWER_KEY_ICMPV4_TYPE
, &mask
->icmp
.type
,
1284 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK
,
1285 sizeof(key
->icmp
.type
)) ||
1286 fl_dump_key_val(skb
, &key
->icmp
.code
,
1287 TCA_FLOWER_KEY_ICMPV4_CODE
, &mask
->icmp
.code
,
1288 TCA_FLOWER_KEY_ICMPV4_CODE_MASK
,
1289 sizeof(key
->icmp
.code
))))
1290 goto nla_put_failure
;
1291 else if (key
->basic
.n_proto
== htons(ETH_P_IPV6
) &&
1292 key
->basic
.ip_proto
== IPPROTO_ICMPV6
&&
1293 (fl_dump_key_val(skb
, &key
->icmp
.type
,
1294 TCA_FLOWER_KEY_ICMPV6_TYPE
, &mask
->icmp
.type
,
1295 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK
,
1296 sizeof(key
->icmp
.type
)) ||
1297 fl_dump_key_val(skb
, &key
->icmp
.code
,
1298 TCA_FLOWER_KEY_ICMPV6_CODE
, &mask
->icmp
.code
,
1299 TCA_FLOWER_KEY_ICMPV6_CODE_MASK
,
1300 sizeof(key
->icmp
.code
))))
1301 goto nla_put_failure
;
1302 else if ((key
->basic
.n_proto
== htons(ETH_P_ARP
) ||
1303 key
->basic
.n_proto
== htons(ETH_P_RARP
)) &&
1304 (fl_dump_key_val(skb
, &key
->arp
.sip
,
1305 TCA_FLOWER_KEY_ARP_SIP
, &mask
->arp
.sip
,
1306 TCA_FLOWER_KEY_ARP_SIP_MASK
,
1307 sizeof(key
->arp
.sip
)) ||
1308 fl_dump_key_val(skb
, &key
->arp
.tip
,
1309 TCA_FLOWER_KEY_ARP_TIP
, &mask
->arp
.tip
,
1310 TCA_FLOWER_KEY_ARP_TIP_MASK
,
1311 sizeof(key
->arp
.tip
)) ||
1312 fl_dump_key_val(skb
, &key
->arp
.op
,
1313 TCA_FLOWER_KEY_ARP_OP
, &mask
->arp
.op
,
1314 TCA_FLOWER_KEY_ARP_OP_MASK
,
1315 sizeof(key
->arp
.op
)) ||
1316 fl_dump_key_val(skb
, key
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA
,
1317 mask
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA_MASK
,
1318 sizeof(key
->arp
.sha
)) ||
1319 fl_dump_key_val(skb
, key
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA
,
1320 mask
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA_MASK
,
1321 sizeof(key
->arp
.tha
))))
1322 goto nla_put_failure
;
1324 if (key
->enc_control
.addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
&&
1325 (fl_dump_key_val(skb
, &key
->enc_ipv4
.src
,
1326 TCA_FLOWER_KEY_ENC_IPV4_SRC
, &mask
->enc_ipv4
.src
,
1327 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
,
1328 sizeof(key
->enc_ipv4
.src
)) ||
1329 fl_dump_key_val(skb
, &key
->enc_ipv4
.dst
,
1330 TCA_FLOWER_KEY_ENC_IPV4_DST
, &mask
->enc_ipv4
.dst
,
1331 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
,
1332 sizeof(key
->enc_ipv4
.dst
))))
1333 goto nla_put_failure
;
1334 else if (key
->enc_control
.addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
&&
1335 (fl_dump_key_val(skb
, &key
->enc_ipv6
.src
,
1336 TCA_FLOWER_KEY_ENC_IPV6_SRC
, &mask
->enc_ipv6
.src
,
1337 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
,
1338 sizeof(key
->enc_ipv6
.src
)) ||
1339 fl_dump_key_val(skb
, &key
->enc_ipv6
.dst
,
1340 TCA_FLOWER_KEY_ENC_IPV6_DST
,
1341 &mask
->enc_ipv6
.dst
,
1342 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
,
1343 sizeof(key
->enc_ipv6
.dst
))))
1344 goto nla_put_failure
;
1346 if (fl_dump_key_val(skb
, &key
->enc_key_id
, TCA_FLOWER_KEY_ENC_KEY_ID
,
1347 &mask
->enc_key_id
, TCA_FLOWER_UNSPEC
,
1348 sizeof(key
->enc_key_id
)) ||
1349 fl_dump_key_val(skb
, &key
->enc_tp
.src
,
1350 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
,
1352 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
,
1353 sizeof(key
->enc_tp
.src
)) ||
1354 fl_dump_key_val(skb
, &key
->enc_tp
.dst
,
1355 TCA_FLOWER_KEY_ENC_UDP_DST_PORT
,
1357 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
,
1358 sizeof(key
->enc_tp
.dst
)))
1359 goto nla_put_failure
;
1361 if (fl_dump_key_flags(skb
, key
->control
.flags
, mask
->control
.flags
))
1362 goto nla_put_failure
;
1364 if (f
->flags
&& nla_put_u32(skb
, TCA_FLOWER_FLAGS
, f
->flags
))
1365 goto nla_put_failure
;
1367 if (tcf_exts_dump(skb
, &f
->exts
))
1368 goto nla_put_failure
;
1370 nla_nest_end(skb
, nest
);
1372 if (tcf_exts_dump_stats(skb
, &f
->exts
) < 0)
1373 goto nla_put_failure
;
1378 nla_nest_cancel(skb
, nest
);
1382 static void fl_bind_class(void *fh
, u32 classid
, unsigned long cl
)
1384 struct cls_fl_filter
*f
= fh
;
1386 if (f
&& f
->res
.classid
== classid
)
1390 static struct tcf_proto_ops cls_fl_ops __read_mostly
= {
1392 .classify
= fl_classify
,
1394 .destroy
= fl_destroy
,
1396 .change
= fl_change
,
1397 .delete = fl_delete
,
1400 .bind_class
= fl_bind_class
,
1401 .owner
= THIS_MODULE
,
1404 static int __init
cls_fl_init(void)
1406 return register_tcf_proto_ops(&cls_fl_ops
);
1409 static void __exit
cls_fl_exit(void)
1411 unregister_tcf_proto_ops(&cls_fl_ops
);
1414 module_init(cls_fl_init
);
1415 module_exit(cls_fl_exit
);
1417 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
1418 MODULE_DESCRIPTION("Flower classifier");
1419 MODULE_LICENSE("GPL v2");