2 * net/sched/cls_flower.c Flower classifier
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/rhashtable.h>
17 #include <linux/if_ether.h>
18 #include <linux/in6.h>
21 #include <net/sch_generic.h>
22 #include <net/pkt_cls.h>
24 #include <net/flow_dissector.h>
28 struct flow_dissector_key_control control
;
29 struct flow_dissector_key_basic basic
;
30 struct flow_dissector_key_eth_addrs eth
;
31 struct flow_dissector_key_addrs ipaddrs
;
33 struct flow_dissector_key_ipv4_addrs ipv4
;
34 struct flow_dissector_key_ipv6_addrs ipv6
;
36 struct flow_dissector_key_ports tp
;
37 } __aligned(BITS_PER_LONG
/ 8); /* Ensure that we can do comparisons as longs. */
39 struct fl_flow_mask_range
{
40 unsigned short int start
;
41 unsigned short int end
;
45 struct fl_flow_key key
;
46 struct fl_flow_mask_range range
;
52 struct fl_flow_mask mask
;
53 struct flow_dissector dissector
;
56 struct list_head filters
;
57 struct rhashtable_params ht_params
;
61 struct cls_fl_filter
{
62 struct rhash_head ht_node
;
63 struct fl_flow_key mkey
;
65 struct tcf_result res
;
66 struct fl_flow_key key
;
67 struct list_head list
;
73 static unsigned short int fl_mask_range(const struct fl_flow_mask
*mask
)
75 return mask
->range
.end
- mask
->range
.start
;
78 static void fl_mask_update_range(struct fl_flow_mask
*mask
)
80 const u8
*bytes
= (const u8
*) &mask
->key
;
81 size_t size
= sizeof(mask
->key
);
82 size_t i
, first
= 0, last
= size
- 1;
84 for (i
= 0; i
< sizeof(mask
->key
); i
++) {
91 mask
->range
.start
= rounddown(first
, sizeof(long));
92 mask
->range
.end
= roundup(last
+ 1, sizeof(long));
95 static void *fl_key_get_start(struct fl_flow_key
*key
,
96 const struct fl_flow_mask
*mask
)
98 return (u8
*) key
+ mask
->range
.start
;
101 static void fl_set_masked_key(struct fl_flow_key
*mkey
, struct fl_flow_key
*key
,
102 struct fl_flow_mask
*mask
)
104 const long *lkey
= fl_key_get_start(key
, mask
);
105 const long *lmask
= fl_key_get_start(&mask
->key
, mask
);
106 long *lmkey
= fl_key_get_start(mkey
, mask
);
109 for (i
= 0; i
< fl_mask_range(mask
); i
+= sizeof(long))
110 *lmkey
++ = *lkey
++ & *lmask
++;
113 static void fl_clear_masked_range(struct fl_flow_key
*key
,
114 struct fl_flow_mask
*mask
)
116 memset(fl_key_get_start(key
, mask
), 0, fl_mask_range(mask
));
119 static int fl_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
120 struct tcf_result
*res
)
122 struct cls_fl_head
*head
= rcu_dereference_bh(tp
->root
);
123 struct cls_fl_filter
*f
;
124 struct fl_flow_key skb_key
;
125 struct fl_flow_key skb_mkey
;
127 if (!atomic_read(&head
->ht
.nelems
))
130 fl_clear_masked_range(&skb_key
, &head
->mask
);
131 skb_key
.indev_ifindex
= skb
->skb_iif
;
132 /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
133 * so do it rather here.
135 skb_key
.basic
.n_proto
= skb
->protocol
;
136 skb_flow_dissect(skb
, &head
->dissector
, &skb_key
, 0);
138 fl_set_masked_key(&skb_mkey
, &skb_key
, &head
->mask
);
140 f
= rhashtable_lookup_fast(&head
->ht
,
141 fl_key_get_start(&skb_mkey
, &head
->mask
),
143 if (f
&& !tc_skip_sw(f
->flags
)) {
145 return tcf_exts_exec(skb
, &f
->exts
, res
);
150 static int fl_init(struct tcf_proto
*tp
)
152 struct cls_fl_head
*head
;
154 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
158 INIT_LIST_HEAD_RCU(&head
->filters
);
159 rcu_assign_pointer(tp
->root
, head
);
164 static void fl_destroy_filter(struct rcu_head
*head
)
166 struct cls_fl_filter
*f
= container_of(head
, struct cls_fl_filter
, rcu
);
168 tcf_exts_destroy(&f
->exts
);
172 static void fl_hw_destroy_filter(struct tcf_proto
*tp
, unsigned long cookie
)
174 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
175 struct tc_cls_flower_offload offload
= {0};
176 struct tc_to_netdev tc
;
178 if (!tc_should_offload(dev
, tp
, 0))
181 offload
.command
= TC_CLSFLOWER_DESTROY
;
182 offload
.cookie
= cookie
;
184 tc
.type
= TC_SETUP_CLSFLOWER
;
185 tc
.cls_flower
= &offload
;
187 dev
->netdev_ops
->ndo_setup_tc(dev
, tp
->q
->handle
, tp
->protocol
, &tc
);
190 static int fl_hw_replace_filter(struct tcf_proto
*tp
,
191 struct flow_dissector
*dissector
,
192 struct fl_flow_key
*mask
,
193 struct fl_flow_key
*key
,
194 struct tcf_exts
*actions
,
195 unsigned long cookie
, u32 flags
)
197 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
198 struct tc_cls_flower_offload offload
= {0};
199 struct tc_to_netdev tc
;
202 if (!tc_should_offload(dev
, tp
, flags
))
203 return tc_skip_sw(flags
) ? -EINVAL
: 0;
205 offload
.command
= TC_CLSFLOWER_REPLACE
;
206 offload
.cookie
= cookie
;
207 offload
.dissector
= dissector
;
210 offload
.exts
= actions
;
212 tc
.type
= TC_SETUP_CLSFLOWER
;
213 tc
.cls_flower
= &offload
;
215 err
= dev
->netdev_ops
->ndo_setup_tc(dev
, tp
->q
->handle
, tp
->protocol
, &tc
);
217 if (tc_skip_sw(flags
))
223 static void fl_hw_update_stats(struct tcf_proto
*tp
, struct cls_fl_filter
*f
)
225 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
226 struct tc_cls_flower_offload offload
= {0};
227 struct tc_to_netdev tc
;
229 if (!tc_should_offload(dev
, tp
, 0))
232 offload
.command
= TC_CLSFLOWER_STATS
;
233 offload
.cookie
= (unsigned long)f
;
234 offload
.exts
= &f
->exts
;
236 tc
.type
= TC_SETUP_CLSFLOWER
;
237 tc
.cls_flower
= &offload
;
239 dev
->netdev_ops
->ndo_setup_tc(dev
, tp
->q
->handle
, tp
->protocol
, &tc
);
242 static bool fl_destroy(struct tcf_proto
*tp
, bool force
)
244 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
245 struct cls_fl_filter
*f
, *next
;
247 if (!force
&& !list_empty(&head
->filters
))
250 list_for_each_entry_safe(f
, next
, &head
->filters
, list
) {
251 fl_hw_destroy_filter(tp
, (unsigned long)f
);
252 list_del_rcu(&f
->list
);
253 call_rcu(&f
->rcu
, fl_destroy_filter
);
255 RCU_INIT_POINTER(tp
->root
, NULL
);
256 if (head
->mask_assigned
)
257 rhashtable_destroy(&head
->ht
);
258 kfree_rcu(head
, rcu
);
262 static unsigned long fl_get(struct tcf_proto
*tp
, u32 handle
)
264 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
265 struct cls_fl_filter
*f
;
267 list_for_each_entry(f
, &head
->filters
, list
)
268 if (f
->handle
== handle
)
269 return (unsigned long) f
;
273 static const struct nla_policy fl_policy
[TCA_FLOWER_MAX
+ 1] = {
274 [TCA_FLOWER_UNSPEC
] = { .type
= NLA_UNSPEC
},
275 [TCA_FLOWER_CLASSID
] = { .type
= NLA_U32
},
276 [TCA_FLOWER_INDEV
] = { .type
= NLA_STRING
,
278 [TCA_FLOWER_KEY_ETH_DST
] = { .len
= ETH_ALEN
},
279 [TCA_FLOWER_KEY_ETH_DST_MASK
] = { .len
= ETH_ALEN
},
280 [TCA_FLOWER_KEY_ETH_SRC
] = { .len
= ETH_ALEN
},
281 [TCA_FLOWER_KEY_ETH_SRC_MASK
] = { .len
= ETH_ALEN
},
282 [TCA_FLOWER_KEY_ETH_TYPE
] = { .type
= NLA_U16
},
283 [TCA_FLOWER_KEY_IP_PROTO
] = { .type
= NLA_U8
},
284 [TCA_FLOWER_KEY_IPV4_SRC
] = { .type
= NLA_U32
},
285 [TCA_FLOWER_KEY_IPV4_SRC_MASK
] = { .type
= NLA_U32
},
286 [TCA_FLOWER_KEY_IPV4_DST
] = { .type
= NLA_U32
},
287 [TCA_FLOWER_KEY_IPV4_DST_MASK
] = { .type
= NLA_U32
},
288 [TCA_FLOWER_KEY_IPV6_SRC
] = { .len
= sizeof(struct in6_addr
) },
289 [TCA_FLOWER_KEY_IPV6_SRC_MASK
] = { .len
= sizeof(struct in6_addr
) },
290 [TCA_FLOWER_KEY_IPV6_DST
] = { .len
= sizeof(struct in6_addr
) },
291 [TCA_FLOWER_KEY_IPV6_DST_MASK
] = { .len
= sizeof(struct in6_addr
) },
292 [TCA_FLOWER_KEY_TCP_SRC
] = { .type
= NLA_U16
},
293 [TCA_FLOWER_KEY_TCP_DST
] = { .type
= NLA_U16
},
294 [TCA_FLOWER_KEY_UDP_SRC
] = { .type
= NLA_U16
},
295 [TCA_FLOWER_KEY_UDP_DST
] = { .type
= NLA_U16
},
298 static void fl_set_key_val(struct nlattr
**tb
,
299 void *val
, int val_type
,
300 void *mask
, int mask_type
, int len
)
304 memcpy(val
, nla_data(tb
[val_type
]), len
);
305 if (mask_type
== TCA_FLOWER_UNSPEC
|| !tb
[mask_type
])
306 memset(mask
, 0xff, len
);
308 memcpy(mask
, nla_data(tb
[mask_type
]), len
);
311 static int fl_set_key(struct net
*net
, struct nlattr
**tb
,
312 struct fl_flow_key
*key
, struct fl_flow_key
*mask
)
314 #ifdef CONFIG_NET_CLS_IND
315 if (tb
[TCA_FLOWER_INDEV
]) {
316 int err
= tcf_change_indev(net
, tb
[TCA_FLOWER_INDEV
]);
319 key
->indev_ifindex
= err
;
320 mask
->indev_ifindex
= 0xffffffff;
324 fl_set_key_val(tb
, key
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST
,
325 mask
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST_MASK
,
326 sizeof(key
->eth
.dst
));
327 fl_set_key_val(tb
, key
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC
,
328 mask
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC_MASK
,
329 sizeof(key
->eth
.src
));
331 fl_set_key_val(tb
, &key
->basic
.n_proto
, TCA_FLOWER_KEY_ETH_TYPE
,
332 &mask
->basic
.n_proto
, TCA_FLOWER_UNSPEC
,
333 sizeof(key
->basic
.n_proto
));
335 if (key
->basic
.n_proto
== htons(ETH_P_IP
) ||
336 key
->basic
.n_proto
== htons(ETH_P_IPV6
)) {
337 fl_set_key_val(tb
, &key
->basic
.ip_proto
, TCA_FLOWER_KEY_IP_PROTO
,
338 &mask
->basic
.ip_proto
, TCA_FLOWER_UNSPEC
,
339 sizeof(key
->basic
.ip_proto
));
342 if (tb
[TCA_FLOWER_KEY_IPV4_SRC
] || tb
[TCA_FLOWER_KEY_IPV4_DST
]) {
343 key
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
344 fl_set_key_val(tb
, &key
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC
,
345 &mask
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC_MASK
,
346 sizeof(key
->ipv4
.src
));
347 fl_set_key_val(tb
, &key
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST
,
348 &mask
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST_MASK
,
349 sizeof(key
->ipv4
.dst
));
350 } else if (tb
[TCA_FLOWER_KEY_IPV6_SRC
] || tb
[TCA_FLOWER_KEY_IPV6_DST
]) {
351 key
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
352 fl_set_key_val(tb
, &key
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC
,
353 &mask
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC_MASK
,
354 sizeof(key
->ipv6
.src
));
355 fl_set_key_val(tb
, &key
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST
,
356 &mask
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST_MASK
,
357 sizeof(key
->ipv6
.dst
));
360 if (key
->basic
.ip_proto
== IPPROTO_TCP
) {
361 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC
,
362 &mask
->tp
.src
, TCA_FLOWER_UNSPEC
,
363 sizeof(key
->tp
.src
));
364 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST
,
365 &mask
->tp
.dst
, TCA_FLOWER_UNSPEC
,
366 sizeof(key
->tp
.dst
));
367 } else if (key
->basic
.ip_proto
== IPPROTO_UDP
) {
368 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC
,
369 &mask
->tp
.src
, TCA_FLOWER_UNSPEC
,
370 sizeof(key
->tp
.src
));
371 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST
,
372 &mask
->tp
.dst
, TCA_FLOWER_UNSPEC
,
373 sizeof(key
->tp
.dst
));
379 static bool fl_mask_eq(struct fl_flow_mask
*mask1
,
380 struct fl_flow_mask
*mask2
)
382 const long *lmask1
= fl_key_get_start(&mask1
->key
, mask1
);
383 const long *lmask2
= fl_key_get_start(&mask2
->key
, mask2
);
385 return !memcmp(&mask1
->range
, &mask2
->range
, sizeof(mask1
->range
)) &&
386 !memcmp(lmask1
, lmask2
, fl_mask_range(mask1
));
389 static const struct rhashtable_params fl_ht_params
= {
390 .key_offset
= offsetof(struct cls_fl_filter
, mkey
), /* base offset */
391 .head_offset
= offsetof(struct cls_fl_filter
, ht_node
),
392 .automatic_shrinking
= true,
395 static int fl_init_hashtable(struct cls_fl_head
*head
,
396 struct fl_flow_mask
*mask
)
398 head
->ht_params
= fl_ht_params
;
399 head
->ht_params
.key_len
= fl_mask_range(mask
);
400 head
->ht_params
.key_offset
+= mask
->range
.start
;
402 return rhashtable_init(&head
->ht
, &head
->ht_params
);
405 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
406 #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
407 #define FL_KEY_MEMBER_END_OFFSET(member) \
408 (FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member))
410 #define FL_KEY_IN_RANGE(mask, member) \
411 (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end && \
412 FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start)
414 #define FL_KEY_SET(keys, cnt, id, member) \
416 keys[cnt].key_id = id; \
417 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
421 #define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member) \
423 if (FL_KEY_IN_RANGE(mask, member)) \
424 FL_KEY_SET(keys, cnt, id, member); \
427 static void fl_init_dissector(struct cls_fl_head
*head
,
428 struct fl_flow_mask
*mask
)
430 struct flow_dissector_key keys
[FLOW_DISSECTOR_KEY_MAX
];
433 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_CONTROL
, control
);
434 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_BASIC
, basic
);
435 FL_KEY_SET_IF_IN_RANGE(mask
, keys
, cnt
,
436 FLOW_DISSECTOR_KEY_ETH_ADDRS
, eth
);
437 FL_KEY_SET_IF_IN_RANGE(mask
, keys
, cnt
,
438 FLOW_DISSECTOR_KEY_IPV4_ADDRS
, ipv4
);
439 FL_KEY_SET_IF_IN_RANGE(mask
, keys
, cnt
,
440 FLOW_DISSECTOR_KEY_IPV6_ADDRS
, ipv6
);
441 FL_KEY_SET_IF_IN_RANGE(mask
, keys
, cnt
,
442 FLOW_DISSECTOR_KEY_PORTS
, tp
);
444 skb_flow_dissector_init(&head
->dissector
, keys
, cnt
);
447 static int fl_check_assign_mask(struct cls_fl_head
*head
,
448 struct fl_flow_mask
*mask
)
452 if (head
->mask_assigned
) {
453 if (!fl_mask_eq(&head
->mask
, mask
))
459 /* Mask is not assigned yet. So assign it and init hashtable
462 err
= fl_init_hashtable(head
, mask
);
465 memcpy(&head
->mask
, mask
, sizeof(head
->mask
));
466 head
->mask_assigned
= true;
468 fl_init_dissector(head
, mask
);
473 static int fl_set_parms(struct net
*net
, struct tcf_proto
*tp
,
474 struct cls_fl_filter
*f
, struct fl_flow_mask
*mask
,
475 unsigned long base
, struct nlattr
**tb
,
476 struct nlattr
*est
, bool ovr
)
481 tcf_exts_init(&e
, TCA_FLOWER_ACT
, 0);
482 err
= tcf_exts_validate(net
, tp
, tb
, est
, &e
, ovr
);
486 if (tb
[TCA_FLOWER_CLASSID
]) {
487 f
->res
.classid
= nla_get_u32(tb
[TCA_FLOWER_CLASSID
]);
488 tcf_bind_filter(tp
, &f
->res
, base
);
491 err
= fl_set_key(net
, tb
, &f
->key
, &mask
->key
);
495 fl_mask_update_range(mask
);
496 fl_set_masked_key(&f
->mkey
, &f
->key
, mask
);
498 tcf_exts_change(tp
, &f
->exts
, &e
);
502 tcf_exts_destroy(&e
);
506 static u32
fl_grab_new_handle(struct tcf_proto
*tp
,
507 struct cls_fl_head
*head
)
509 unsigned int i
= 0x80000000;
513 if (++head
->hgen
== 0x7FFFFFFF)
515 } while (--i
> 0 && fl_get(tp
, head
->hgen
));
517 if (unlikely(i
== 0)) {
518 pr_err("Insufficient number of handles\n");
527 static int fl_change(struct net
*net
, struct sk_buff
*in_skb
,
528 struct tcf_proto
*tp
, unsigned long base
,
529 u32 handle
, struct nlattr
**tca
,
530 unsigned long *arg
, bool ovr
)
532 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
533 struct cls_fl_filter
*fold
= (struct cls_fl_filter
*) *arg
;
534 struct cls_fl_filter
*fnew
;
535 struct nlattr
*tb
[TCA_FLOWER_MAX
+ 1];
536 struct fl_flow_mask mask
= {};
539 if (!tca
[TCA_OPTIONS
])
542 err
= nla_parse_nested(tb
, TCA_FLOWER_MAX
, tca
[TCA_OPTIONS
], fl_policy
);
546 if (fold
&& handle
&& fold
->handle
!= handle
)
549 fnew
= kzalloc(sizeof(*fnew
), GFP_KERNEL
);
553 tcf_exts_init(&fnew
->exts
, TCA_FLOWER_ACT
, 0);
556 handle
= fl_grab_new_handle(tp
, head
);
562 fnew
->handle
= handle
;
564 if (tb
[TCA_FLOWER_FLAGS
]) {
565 fnew
->flags
= nla_get_u32(tb
[TCA_FLOWER_FLAGS
]);
567 if (!tc_flags_valid(fnew
->flags
)) {
573 err
= fl_set_parms(net
, tp
, fnew
, &mask
, base
, tb
, tca
[TCA_RATE
], ovr
);
577 err
= fl_check_assign_mask(head
, &mask
);
581 if (!tc_skip_sw(fnew
->flags
)) {
582 err
= rhashtable_insert_fast(&head
->ht
, &fnew
->ht_node
,
588 err
= fl_hw_replace_filter(tp
,
599 rhashtable_remove_fast(&head
->ht
, &fold
->ht_node
,
601 fl_hw_destroy_filter(tp
, (unsigned long)fold
);
604 *arg
= (unsigned long) fnew
;
607 list_replace_rcu(&fold
->list
, &fnew
->list
);
608 tcf_unbind_filter(tp
, &fold
->res
);
609 call_rcu(&fold
->rcu
, fl_destroy_filter
);
611 list_add_tail_rcu(&fnew
->list
, &head
->filters
);
621 static int fl_delete(struct tcf_proto
*tp
, unsigned long arg
)
623 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
624 struct cls_fl_filter
*f
= (struct cls_fl_filter
*) arg
;
626 rhashtable_remove_fast(&head
->ht
, &f
->ht_node
,
628 list_del_rcu(&f
->list
);
629 fl_hw_destroy_filter(tp
, (unsigned long)f
);
630 tcf_unbind_filter(tp
, &f
->res
);
631 call_rcu(&f
->rcu
, fl_destroy_filter
);
635 static void fl_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
637 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
638 struct cls_fl_filter
*f
;
640 list_for_each_entry_rcu(f
, &head
->filters
, list
) {
641 if (arg
->count
< arg
->skip
)
643 if (arg
->fn(tp
, (unsigned long) f
, arg
) < 0) {
652 static int fl_dump_key_val(struct sk_buff
*skb
,
653 void *val
, int val_type
,
654 void *mask
, int mask_type
, int len
)
658 if (!memchr_inv(mask
, 0, len
))
660 err
= nla_put(skb
, val_type
, len
, val
);
663 if (mask_type
!= TCA_FLOWER_UNSPEC
) {
664 err
= nla_put(skb
, mask_type
, len
, mask
);
671 static int fl_dump(struct net
*net
, struct tcf_proto
*tp
, unsigned long fh
,
672 struct sk_buff
*skb
, struct tcmsg
*t
)
674 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
675 struct cls_fl_filter
*f
= (struct cls_fl_filter
*) fh
;
677 struct fl_flow_key
*key
, *mask
;
682 t
->tcm_handle
= f
->handle
;
684 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
686 goto nla_put_failure
;
688 if (f
->res
.classid
&&
689 nla_put_u32(skb
, TCA_FLOWER_CLASSID
, f
->res
.classid
))
690 goto nla_put_failure
;
693 mask
= &head
->mask
.key
;
695 if (mask
->indev_ifindex
) {
696 struct net_device
*dev
;
698 dev
= __dev_get_by_index(net
, key
->indev_ifindex
);
699 if (dev
&& nla_put_string(skb
, TCA_FLOWER_INDEV
, dev
->name
))
700 goto nla_put_failure
;
703 fl_hw_update_stats(tp
, f
);
705 if (fl_dump_key_val(skb
, key
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST
,
706 mask
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST_MASK
,
707 sizeof(key
->eth
.dst
)) ||
708 fl_dump_key_val(skb
, key
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC
,
709 mask
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC_MASK
,
710 sizeof(key
->eth
.src
)) ||
711 fl_dump_key_val(skb
, &key
->basic
.n_proto
, TCA_FLOWER_KEY_ETH_TYPE
,
712 &mask
->basic
.n_proto
, TCA_FLOWER_UNSPEC
,
713 sizeof(key
->basic
.n_proto
)))
714 goto nla_put_failure
;
715 if ((key
->basic
.n_proto
== htons(ETH_P_IP
) ||
716 key
->basic
.n_proto
== htons(ETH_P_IPV6
)) &&
717 fl_dump_key_val(skb
, &key
->basic
.ip_proto
, TCA_FLOWER_KEY_IP_PROTO
,
718 &mask
->basic
.ip_proto
, TCA_FLOWER_UNSPEC
,
719 sizeof(key
->basic
.ip_proto
)))
720 goto nla_put_failure
;
722 if (key
->control
.addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
&&
723 (fl_dump_key_val(skb
, &key
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC
,
724 &mask
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC_MASK
,
725 sizeof(key
->ipv4
.src
)) ||
726 fl_dump_key_val(skb
, &key
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST
,
727 &mask
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST_MASK
,
728 sizeof(key
->ipv4
.dst
))))
729 goto nla_put_failure
;
730 else if (key
->control
.addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
&&
731 (fl_dump_key_val(skb
, &key
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC
,
732 &mask
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC_MASK
,
733 sizeof(key
->ipv6
.src
)) ||
734 fl_dump_key_val(skb
, &key
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST
,
735 &mask
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST_MASK
,
736 sizeof(key
->ipv6
.dst
))))
737 goto nla_put_failure
;
739 if (key
->basic
.ip_proto
== IPPROTO_TCP
&&
740 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC
,
741 &mask
->tp
.src
, TCA_FLOWER_UNSPEC
,
742 sizeof(key
->tp
.src
)) ||
743 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST
,
744 &mask
->tp
.dst
, TCA_FLOWER_UNSPEC
,
745 sizeof(key
->tp
.dst
))))
746 goto nla_put_failure
;
747 else if (key
->basic
.ip_proto
== IPPROTO_UDP
&&
748 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC
,
749 &mask
->tp
.src
, TCA_FLOWER_UNSPEC
,
750 sizeof(key
->tp
.src
)) ||
751 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST
,
752 &mask
->tp
.dst
, TCA_FLOWER_UNSPEC
,
753 sizeof(key
->tp
.dst
))))
754 goto nla_put_failure
;
756 nla_put_u32(skb
, TCA_FLOWER_FLAGS
, f
->flags
);
758 if (tcf_exts_dump(skb
, &f
->exts
))
759 goto nla_put_failure
;
761 nla_nest_end(skb
, nest
);
763 if (tcf_exts_dump_stats(skb
, &f
->exts
) < 0)
764 goto nla_put_failure
;
769 nla_nest_cancel(skb
, nest
);
773 static struct tcf_proto_ops cls_fl_ops __read_mostly
= {
775 .classify
= fl_classify
,
777 .destroy
= fl_destroy
,
783 .owner
= THIS_MODULE
,
786 static int __init
cls_fl_init(void)
788 return register_tcf_proto_ops(&cls_fl_ops
);
791 static void __exit
cls_fl_exit(void)
793 unregister_tcf_proto_ops(&cls_fl_ops
);
796 module_init(cls_fl_init
);
797 module_exit(cls_fl_exit
);
799 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
800 MODULE_DESCRIPTION("Flower classifier");
801 MODULE_LICENSE("GPL v2");