rt2x00usb: fix anchor initialization
[linux/fpc-iii.git] / net / sched / cls_flower.c
blobeee299bb6bcffbea0a20c2be595f3080e53dee1e
1 /*
2 * net/sched/cls_flower.c Flower classifier
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/rhashtable.h>
16 #include <linux/workqueue.h>
18 #include <linux/if_ether.h>
19 #include <linux/in6.h>
20 #include <linux/ip.h>
22 #include <net/sch_generic.h>
23 #include <net/pkt_cls.h>
24 #include <net/ip.h>
25 #include <net/flow_dissector.h>
27 #include <net/dst.h>
28 #include <net/dst_metadata.h>
30 struct fl_flow_key {
31 int indev_ifindex;
32 struct flow_dissector_key_control control;
33 struct flow_dissector_key_control enc_control;
34 struct flow_dissector_key_basic basic;
35 struct flow_dissector_key_eth_addrs eth;
36 struct flow_dissector_key_vlan vlan;
37 union {
38 struct flow_dissector_key_ipv4_addrs ipv4;
39 struct flow_dissector_key_ipv6_addrs ipv6;
41 struct flow_dissector_key_ports tp;
42 struct flow_dissector_key_keyid enc_key_id;
43 union {
44 struct flow_dissector_key_ipv4_addrs enc_ipv4;
45 struct flow_dissector_key_ipv6_addrs enc_ipv6;
47 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
49 struct fl_flow_mask_range {
50 unsigned short int start;
51 unsigned short int end;
54 struct fl_flow_mask {
55 struct fl_flow_key key;
56 struct fl_flow_mask_range range;
57 struct rcu_head rcu;
60 struct cls_fl_head {
61 struct rhashtable ht;
62 struct fl_flow_mask mask;
63 struct flow_dissector dissector;
64 u32 hgen;
65 bool mask_assigned;
66 struct list_head filters;
67 struct rhashtable_params ht_params;
68 union {
69 struct work_struct work;
70 struct rcu_head rcu;
74 struct cls_fl_filter {
75 struct rhash_head ht_node;
76 struct fl_flow_key mkey;
77 struct tcf_exts exts;
78 struct tcf_result res;
79 struct fl_flow_key key;
80 struct list_head list;
81 u32 handle;
82 u32 flags;
83 struct rcu_head rcu;
86 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
88 return mask->range.end - mask->range.start;
91 static void fl_mask_update_range(struct fl_flow_mask *mask)
93 const u8 *bytes = (const u8 *) &mask->key;
94 size_t size = sizeof(mask->key);
95 size_t i, first = 0, last = size - 1;
97 for (i = 0; i < sizeof(mask->key); i++) {
98 if (bytes[i]) {
99 if (!first && i)
100 first = i;
101 last = i;
104 mask->range.start = rounddown(first, sizeof(long));
105 mask->range.end = roundup(last + 1, sizeof(long));
108 static void *fl_key_get_start(struct fl_flow_key *key,
109 const struct fl_flow_mask *mask)
111 return (u8 *) key + mask->range.start;
114 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
115 struct fl_flow_mask *mask)
117 const long *lkey = fl_key_get_start(key, mask);
118 const long *lmask = fl_key_get_start(&mask->key, mask);
119 long *lmkey = fl_key_get_start(mkey, mask);
120 int i;
122 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
123 *lmkey++ = *lkey++ & *lmask++;
126 static void fl_clear_masked_range(struct fl_flow_key *key,
127 struct fl_flow_mask *mask)
129 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
132 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
133 struct tcf_result *res)
135 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
136 struct cls_fl_filter *f;
137 struct fl_flow_key skb_key;
138 struct fl_flow_key skb_mkey;
139 struct ip_tunnel_info *info;
141 if (!atomic_read(&head->ht.nelems))
142 return -1;
144 fl_clear_masked_range(&skb_key, &head->mask);
146 info = skb_tunnel_info(skb);
147 if (info) {
148 struct ip_tunnel_key *key = &info->key;
150 switch (ip_tunnel_info_af(info)) {
151 case AF_INET:
152 skb_key.enc_control.addr_type =
153 FLOW_DISSECTOR_KEY_IPV4_ADDRS;
154 skb_key.enc_ipv4.src = key->u.ipv4.src;
155 skb_key.enc_ipv4.dst = key->u.ipv4.dst;
156 break;
157 case AF_INET6:
158 skb_key.enc_control.addr_type =
159 FLOW_DISSECTOR_KEY_IPV6_ADDRS;
160 skb_key.enc_ipv6.src = key->u.ipv6.src;
161 skb_key.enc_ipv6.dst = key->u.ipv6.dst;
162 break;
165 skb_key.enc_key_id.keyid = tunnel_id_to_key32(key->tun_id);
168 skb_key.indev_ifindex = skb->skb_iif;
169 /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
170 * so do it rather here.
172 skb_key.basic.n_proto = skb->protocol;
173 skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
175 fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
177 f = rhashtable_lookup_fast(&head->ht,
178 fl_key_get_start(&skb_mkey, &head->mask),
179 head->ht_params);
180 if (f && !tc_skip_sw(f->flags)) {
181 *res = f->res;
182 return tcf_exts_exec(skb, &f->exts, res);
184 return -1;
187 static int fl_init(struct tcf_proto *tp)
189 struct cls_fl_head *head;
191 head = kzalloc(sizeof(*head), GFP_KERNEL);
192 if (!head)
193 return -ENOBUFS;
195 INIT_LIST_HEAD_RCU(&head->filters);
196 rcu_assign_pointer(tp->root, head);
198 return 0;
201 static void fl_destroy_filter(struct rcu_head *head)
203 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
205 tcf_exts_destroy(&f->exts);
206 kfree(f);
209 static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie)
211 struct net_device *dev = tp->q->dev_queue->dev;
212 struct tc_cls_flower_offload offload = {0};
213 struct tc_to_netdev tc;
215 if (!tc_should_offload(dev, tp, 0))
216 return;
218 offload.command = TC_CLSFLOWER_DESTROY;
219 offload.cookie = cookie;
221 tc.type = TC_SETUP_CLSFLOWER;
222 tc.cls_flower = &offload;
224 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
227 static int fl_hw_replace_filter(struct tcf_proto *tp,
228 struct flow_dissector *dissector,
229 struct fl_flow_key *mask,
230 struct fl_flow_key *key,
231 struct tcf_exts *actions,
232 unsigned long cookie, u32 flags)
234 struct net_device *dev = tp->q->dev_queue->dev;
235 struct tc_cls_flower_offload offload = {0};
236 struct tc_to_netdev tc;
237 int err;
239 if (!tc_should_offload(dev, tp, flags))
240 return tc_skip_sw(flags) ? -EINVAL : 0;
242 offload.command = TC_CLSFLOWER_REPLACE;
243 offload.cookie = cookie;
244 offload.dissector = dissector;
245 offload.mask = mask;
246 offload.key = key;
247 offload.exts = actions;
249 tc.type = TC_SETUP_CLSFLOWER;
250 tc.cls_flower = &offload;
252 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
253 &tc);
255 if (tc_skip_sw(flags))
256 return err;
258 return 0;
261 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
263 struct net_device *dev = tp->q->dev_queue->dev;
264 struct tc_cls_flower_offload offload = {0};
265 struct tc_to_netdev tc;
267 if (!tc_should_offload(dev, tp, 0))
268 return;
270 offload.command = TC_CLSFLOWER_STATS;
271 offload.cookie = (unsigned long)f;
272 offload.exts = &f->exts;
274 tc.type = TC_SETUP_CLSFLOWER;
275 tc.cls_flower = &offload;
277 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
280 static void fl_destroy_sleepable(struct work_struct *work)
282 struct cls_fl_head *head = container_of(work, struct cls_fl_head,
283 work);
284 if (head->mask_assigned)
285 rhashtable_destroy(&head->ht);
286 kfree(head);
287 module_put(THIS_MODULE);
290 static void fl_destroy_rcu(struct rcu_head *rcu)
292 struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
294 INIT_WORK(&head->work, fl_destroy_sleepable);
295 schedule_work(&head->work);
298 static bool fl_destroy(struct tcf_proto *tp, bool force)
300 struct cls_fl_head *head = rtnl_dereference(tp->root);
301 struct cls_fl_filter *f, *next;
303 if (!force && !list_empty(&head->filters))
304 return false;
306 list_for_each_entry_safe(f, next, &head->filters, list) {
307 fl_hw_destroy_filter(tp, (unsigned long)f);
308 list_del_rcu(&f->list);
309 call_rcu(&f->rcu, fl_destroy_filter);
312 __module_get(THIS_MODULE);
313 call_rcu(&head->rcu, fl_destroy_rcu);
314 return true;
317 static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
319 struct cls_fl_head *head = rtnl_dereference(tp->root);
320 struct cls_fl_filter *f;
322 list_for_each_entry(f, &head->filters, list)
323 if (f->handle == handle)
324 return (unsigned long) f;
325 return 0;
328 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
329 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
330 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
331 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
332 .len = IFNAMSIZ },
333 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
334 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
335 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
336 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
337 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
338 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
339 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
340 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
341 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
342 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
343 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
344 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
345 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
346 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
347 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
348 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
349 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
350 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
351 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
352 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
353 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
354 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
355 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
356 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
357 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
358 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
359 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
360 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
361 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
362 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
363 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
364 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
365 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
366 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
369 static void fl_set_key_val(struct nlattr **tb,
370 void *val, int val_type,
371 void *mask, int mask_type, int len)
373 if (!tb[val_type])
374 return;
375 memcpy(val, nla_data(tb[val_type]), len);
376 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
377 memset(mask, 0xff, len);
378 else
379 memcpy(mask, nla_data(tb[mask_type]), len);
382 static void fl_set_key_vlan(struct nlattr **tb,
383 struct flow_dissector_key_vlan *key_val,
384 struct flow_dissector_key_vlan *key_mask)
386 #define VLAN_PRIORITY_MASK 0x7
388 if (tb[TCA_FLOWER_KEY_VLAN_ID]) {
389 key_val->vlan_id =
390 nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK;
391 key_mask->vlan_id = VLAN_VID_MASK;
393 if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) {
394 key_val->vlan_priority =
395 nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) &
396 VLAN_PRIORITY_MASK;
397 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
401 static int fl_set_key(struct net *net, struct nlattr **tb,
402 struct fl_flow_key *key, struct fl_flow_key *mask)
404 __be16 ethertype;
405 #ifdef CONFIG_NET_CLS_IND
406 if (tb[TCA_FLOWER_INDEV]) {
407 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
408 if (err < 0)
409 return err;
410 key->indev_ifindex = err;
411 mask->indev_ifindex = 0xffffffff;
413 #endif
415 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
416 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
417 sizeof(key->eth.dst));
418 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
419 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
420 sizeof(key->eth.src));
422 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
423 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
425 if (ethertype == htons(ETH_P_8021Q)) {
426 fl_set_key_vlan(tb, &key->vlan, &mask->vlan);
427 fl_set_key_val(tb, &key->basic.n_proto,
428 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
429 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
430 sizeof(key->basic.n_proto));
431 } else {
432 key->basic.n_proto = ethertype;
433 mask->basic.n_proto = cpu_to_be16(~0);
437 if (key->basic.n_proto == htons(ETH_P_IP) ||
438 key->basic.n_proto == htons(ETH_P_IPV6)) {
439 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
440 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
441 sizeof(key->basic.ip_proto));
444 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
445 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
446 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
447 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
448 sizeof(key->ipv4.src));
449 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
450 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
451 sizeof(key->ipv4.dst));
452 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
453 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
454 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
455 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
456 sizeof(key->ipv6.src));
457 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
458 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
459 sizeof(key->ipv6.dst));
462 if (key->basic.ip_proto == IPPROTO_TCP) {
463 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
464 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
465 sizeof(key->tp.src));
466 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
467 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
468 sizeof(key->tp.dst));
469 } else if (key->basic.ip_proto == IPPROTO_UDP) {
470 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
471 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
472 sizeof(key->tp.src));
473 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
474 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
475 sizeof(key->tp.dst));
478 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
479 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
480 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
481 fl_set_key_val(tb, &key->enc_ipv4.src,
482 TCA_FLOWER_KEY_ENC_IPV4_SRC,
483 &mask->enc_ipv4.src,
484 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
485 sizeof(key->enc_ipv4.src));
486 fl_set_key_val(tb, &key->enc_ipv4.dst,
487 TCA_FLOWER_KEY_ENC_IPV4_DST,
488 &mask->enc_ipv4.dst,
489 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
490 sizeof(key->enc_ipv4.dst));
493 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
494 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
495 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
496 fl_set_key_val(tb, &key->enc_ipv6.src,
497 TCA_FLOWER_KEY_ENC_IPV6_SRC,
498 &mask->enc_ipv6.src,
499 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
500 sizeof(key->enc_ipv6.src));
501 fl_set_key_val(tb, &key->enc_ipv6.dst,
502 TCA_FLOWER_KEY_ENC_IPV6_DST,
503 &mask->enc_ipv6.dst,
504 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
505 sizeof(key->enc_ipv6.dst));
508 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
509 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
510 sizeof(key->enc_key_id.keyid));
512 return 0;
515 static bool fl_mask_eq(struct fl_flow_mask *mask1,
516 struct fl_flow_mask *mask2)
518 const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
519 const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
521 return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
522 !memcmp(lmask1, lmask2, fl_mask_range(mask1));
525 static const struct rhashtable_params fl_ht_params = {
526 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
527 .head_offset = offsetof(struct cls_fl_filter, ht_node),
528 .automatic_shrinking = true,
531 static int fl_init_hashtable(struct cls_fl_head *head,
532 struct fl_flow_mask *mask)
534 head->ht_params = fl_ht_params;
535 head->ht_params.key_len = fl_mask_range(mask);
536 head->ht_params.key_offset += mask->range.start;
538 return rhashtable_init(&head->ht, &head->ht_params);
541 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
542 #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
544 #define FL_KEY_IS_MASKED(mask, member) \
545 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
546 0, FL_KEY_MEMBER_SIZE(member)) \
548 #define FL_KEY_SET(keys, cnt, id, member) \
549 do { \
550 keys[cnt].key_id = id; \
551 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
552 cnt++; \
553 } while(0);
555 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
556 do { \
557 if (FL_KEY_IS_MASKED(mask, member)) \
558 FL_KEY_SET(keys, cnt, id, member); \
559 } while(0);
561 static void fl_init_dissector(struct cls_fl_head *head,
562 struct fl_flow_mask *mask)
564 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
565 size_t cnt = 0;
567 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
568 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
569 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
570 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
571 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
572 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
573 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
574 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
575 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
576 FLOW_DISSECTOR_KEY_PORTS, tp);
577 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
578 FLOW_DISSECTOR_KEY_VLAN, vlan);
580 skb_flow_dissector_init(&head->dissector, keys, cnt);
583 static int fl_check_assign_mask(struct cls_fl_head *head,
584 struct fl_flow_mask *mask)
586 int err;
588 if (head->mask_assigned) {
589 if (!fl_mask_eq(&head->mask, mask))
590 return -EINVAL;
591 else
592 return 0;
595 /* Mask is not assigned yet. So assign it and init hashtable
596 * according to that.
598 err = fl_init_hashtable(head, mask);
599 if (err)
600 return err;
601 memcpy(&head->mask, mask, sizeof(head->mask));
602 head->mask_assigned = true;
604 fl_init_dissector(head, mask);
606 return 0;
609 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
610 struct cls_fl_filter *f, struct fl_flow_mask *mask,
611 unsigned long base, struct nlattr **tb,
612 struct nlattr *est, bool ovr)
614 struct tcf_exts e;
615 int err;
617 err = tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
618 if (err < 0)
619 return err;
620 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
621 if (err < 0)
622 goto errout;
624 if (tb[TCA_FLOWER_CLASSID]) {
625 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
626 tcf_bind_filter(tp, &f->res, base);
629 err = fl_set_key(net, tb, &f->key, &mask->key);
630 if (err)
631 goto errout;
633 fl_mask_update_range(mask);
634 fl_set_masked_key(&f->mkey, &f->key, mask);
636 tcf_exts_change(tp, &f->exts, &e);
638 return 0;
639 errout:
640 tcf_exts_destroy(&e);
641 return err;
644 static u32 fl_grab_new_handle(struct tcf_proto *tp,
645 struct cls_fl_head *head)
647 unsigned int i = 0x80000000;
648 u32 handle;
650 do {
651 if (++head->hgen == 0x7FFFFFFF)
652 head->hgen = 1;
653 } while (--i > 0 && fl_get(tp, head->hgen));
655 if (unlikely(i == 0)) {
656 pr_err("Insufficient number of handles\n");
657 handle = 0;
658 } else {
659 handle = head->hgen;
662 return handle;
665 static int fl_change(struct net *net, struct sk_buff *in_skb,
666 struct tcf_proto *tp, unsigned long base,
667 u32 handle, struct nlattr **tca,
668 unsigned long *arg, bool ovr)
670 struct cls_fl_head *head = rtnl_dereference(tp->root);
671 struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
672 struct cls_fl_filter *fnew;
673 struct nlattr *tb[TCA_FLOWER_MAX + 1];
674 struct fl_flow_mask mask = {};
675 int err;
677 if (!tca[TCA_OPTIONS])
678 return -EINVAL;
680 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
681 if (err < 0)
682 return err;
684 if (fold && handle && fold->handle != handle)
685 return -EINVAL;
687 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
688 if (!fnew)
689 return -ENOBUFS;
691 err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
692 if (err < 0)
693 goto errout;
695 if (!handle) {
696 handle = fl_grab_new_handle(tp, head);
697 if (!handle) {
698 err = -EINVAL;
699 goto errout;
702 fnew->handle = handle;
704 if (tb[TCA_FLOWER_FLAGS]) {
705 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
707 if (!tc_flags_valid(fnew->flags)) {
708 err = -EINVAL;
709 goto errout;
713 err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
714 if (err)
715 goto errout;
717 err = fl_check_assign_mask(head, &mask);
718 if (err)
719 goto errout;
721 if (!tc_skip_sw(fnew->flags)) {
722 err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
723 head->ht_params);
724 if (err)
725 goto errout;
728 err = fl_hw_replace_filter(tp,
729 &head->dissector,
730 &mask.key,
731 &fnew->key,
732 &fnew->exts,
733 (unsigned long)fnew,
734 fnew->flags);
735 if (err)
736 goto errout;
738 if (fold) {
739 if (!tc_skip_sw(fold->flags))
740 rhashtable_remove_fast(&head->ht, &fold->ht_node,
741 head->ht_params);
742 fl_hw_destroy_filter(tp, (unsigned long)fold);
745 *arg = (unsigned long) fnew;
747 if (fold) {
748 list_replace_rcu(&fold->list, &fnew->list);
749 tcf_unbind_filter(tp, &fold->res);
750 call_rcu(&fold->rcu, fl_destroy_filter);
751 } else {
752 list_add_tail_rcu(&fnew->list, &head->filters);
755 return 0;
757 errout:
758 tcf_exts_destroy(&fnew->exts);
759 kfree(fnew);
760 return err;
763 static int fl_delete(struct tcf_proto *tp, unsigned long arg)
765 struct cls_fl_head *head = rtnl_dereference(tp->root);
766 struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
768 if (!tc_skip_sw(f->flags))
769 rhashtable_remove_fast(&head->ht, &f->ht_node,
770 head->ht_params);
771 list_del_rcu(&f->list);
772 fl_hw_destroy_filter(tp, (unsigned long)f);
773 tcf_unbind_filter(tp, &f->res);
774 call_rcu(&f->rcu, fl_destroy_filter);
775 return 0;
778 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
780 struct cls_fl_head *head = rtnl_dereference(tp->root);
781 struct cls_fl_filter *f;
783 list_for_each_entry_rcu(f, &head->filters, list) {
784 if (arg->count < arg->skip)
785 goto skip;
786 if (arg->fn(tp, (unsigned long) f, arg) < 0) {
787 arg->stop = 1;
788 break;
790 skip:
791 arg->count++;
795 static int fl_dump_key_val(struct sk_buff *skb,
796 void *val, int val_type,
797 void *mask, int mask_type, int len)
799 int err;
801 if (!memchr_inv(mask, 0, len))
802 return 0;
803 err = nla_put(skb, val_type, len, val);
804 if (err)
805 return err;
806 if (mask_type != TCA_FLOWER_UNSPEC) {
807 err = nla_put(skb, mask_type, len, mask);
808 if (err)
809 return err;
811 return 0;
814 static int fl_dump_key_vlan(struct sk_buff *skb,
815 struct flow_dissector_key_vlan *vlan_key,
816 struct flow_dissector_key_vlan *vlan_mask)
818 int err;
820 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
821 return 0;
822 if (vlan_mask->vlan_id) {
823 err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID,
824 vlan_key->vlan_id);
825 if (err)
826 return err;
828 if (vlan_mask->vlan_priority) {
829 err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO,
830 vlan_key->vlan_priority);
831 if (err)
832 return err;
834 return 0;
837 static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
838 struct sk_buff *skb, struct tcmsg *t)
840 struct cls_fl_head *head = rtnl_dereference(tp->root);
841 struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
842 struct nlattr *nest;
843 struct fl_flow_key *key, *mask;
845 if (!f)
846 return skb->len;
848 t->tcm_handle = f->handle;
850 nest = nla_nest_start(skb, TCA_OPTIONS);
851 if (!nest)
852 goto nla_put_failure;
854 if (f->res.classid &&
855 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
856 goto nla_put_failure;
858 key = &f->key;
859 mask = &head->mask.key;
861 if (mask->indev_ifindex) {
862 struct net_device *dev;
864 dev = __dev_get_by_index(net, key->indev_ifindex);
865 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
866 goto nla_put_failure;
869 fl_hw_update_stats(tp, f);
871 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
872 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
873 sizeof(key->eth.dst)) ||
874 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
875 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
876 sizeof(key->eth.src)) ||
877 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
878 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
879 sizeof(key->basic.n_proto)))
880 goto nla_put_failure;
882 if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan))
883 goto nla_put_failure;
885 if ((key->basic.n_proto == htons(ETH_P_IP) ||
886 key->basic.n_proto == htons(ETH_P_IPV6)) &&
887 fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
888 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
889 sizeof(key->basic.ip_proto)))
890 goto nla_put_failure;
892 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
893 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
894 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
895 sizeof(key->ipv4.src)) ||
896 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
897 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
898 sizeof(key->ipv4.dst))))
899 goto nla_put_failure;
900 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
901 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
902 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
903 sizeof(key->ipv6.src)) ||
904 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
905 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
906 sizeof(key->ipv6.dst))))
907 goto nla_put_failure;
909 if (key->basic.ip_proto == IPPROTO_TCP &&
910 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
911 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
912 sizeof(key->tp.src)) ||
913 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
914 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
915 sizeof(key->tp.dst))))
916 goto nla_put_failure;
917 else if (key->basic.ip_proto == IPPROTO_UDP &&
918 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
919 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
920 sizeof(key->tp.src)) ||
921 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
922 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
923 sizeof(key->tp.dst))))
924 goto nla_put_failure;
926 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
927 (fl_dump_key_val(skb, &key->enc_ipv4.src,
928 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
929 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
930 sizeof(key->enc_ipv4.src)) ||
931 fl_dump_key_val(skb, &key->enc_ipv4.dst,
932 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
933 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
934 sizeof(key->enc_ipv4.dst))))
935 goto nla_put_failure;
936 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
937 (fl_dump_key_val(skb, &key->enc_ipv6.src,
938 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
939 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
940 sizeof(key->enc_ipv6.src)) ||
941 fl_dump_key_val(skb, &key->enc_ipv6.dst,
942 TCA_FLOWER_KEY_ENC_IPV6_DST,
943 &mask->enc_ipv6.dst,
944 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
945 sizeof(key->enc_ipv6.dst))))
946 goto nla_put_failure;
948 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
949 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
950 sizeof(key->enc_key_id)))
951 goto nla_put_failure;
953 nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags);
955 if (tcf_exts_dump(skb, &f->exts))
956 goto nla_put_failure;
958 nla_nest_end(skb, nest);
960 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
961 goto nla_put_failure;
963 return skb->len;
965 nla_put_failure:
966 nla_nest_cancel(skb, nest);
967 return -1;
970 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
971 .kind = "flower",
972 .classify = fl_classify,
973 .init = fl_init,
974 .destroy = fl_destroy,
975 .get = fl_get,
976 .change = fl_change,
977 .delete = fl_delete,
978 .walk = fl_walk,
979 .dump = fl_dump,
980 .owner = THIS_MODULE,
983 static int __init cls_fl_init(void)
985 return register_tcf_proto_ops(&cls_fl_ops);
988 static void __exit cls_fl_exit(void)
990 unregister_tcf_proto_ops(&cls_fl_ops);
993 module_init(cls_fl_init);
994 module_exit(cls_fl_exit);
996 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
997 MODULE_DESCRIPTION("Flower classifier");
998 MODULE_LICENSE("GPL v2");