drm/i915: fix port checks for MST support on gen >= 11
[linux/fpc-iii.git] / net / sched / cls_u32.c
blobfe246e03fcd9c4134f642cda6a559289028022e5
1 /*
2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
27 * JHS: We should remove the CONFIG_NET_CLS_IND from here
28 * eventually when the meta match extension is made available
30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/kernel.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/percpu.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/skbuff.h>
42 #include <linux/bitmap.h>
43 #include <linux/netdevice.h>
44 #include <linux/hash.h>
45 #include <net/netlink.h>
46 #include <net/act_api.h>
47 #include <net/pkt_cls.h>
48 #include <linux/idr.h>
50 struct tc_u_knode {
51 struct tc_u_knode __rcu *next;
52 u32 handle;
53 struct tc_u_hnode __rcu *ht_up;
54 struct tcf_exts exts;
55 #ifdef CONFIG_NET_CLS_IND
56 int ifindex;
57 #endif
58 u8 fshift;
59 struct tcf_result res;
60 struct tc_u_hnode __rcu *ht_down;
61 #ifdef CONFIG_CLS_U32_PERF
62 struct tc_u32_pcnt __percpu *pf;
63 #endif
64 u32 flags;
65 unsigned int in_hw_count;
66 #ifdef CONFIG_CLS_U32_MARK
67 u32 val;
68 u32 mask;
69 u32 __percpu *pcpu_success;
70 #endif
71 struct tcf_proto *tp;
72 struct rcu_work rwork;
73 /* The 'sel' field MUST be the last field in structure to allow for
74 * tc_u32_keys allocated at end of structure.
76 struct tc_u32_sel sel;
79 struct tc_u_hnode {
80 struct tc_u_hnode __rcu *next;
81 u32 handle;
82 u32 prio;
83 struct tc_u_common *tp_c;
84 int refcnt;
85 unsigned int divisor;
86 struct idr handle_idr;
87 struct rcu_head rcu;
88 u32 flags;
89 /* The 'ht' field MUST be the last field in structure to allow for
90 * more entries allocated at end of structure.
92 struct tc_u_knode __rcu *ht[1];
95 struct tc_u_common {
96 struct tc_u_hnode __rcu *hlist;
97 void *ptr;
98 int refcnt;
99 struct idr handle_idr;
100 struct hlist_node hnode;
101 struct rcu_head rcu;
104 static inline unsigned int u32_hash_fold(__be32 key,
105 const struct tc_u32_sel *sel,
106 u8 fshift)
108 unsigned int h = ntohl(key & sel->hmask) >> fshift;
110 return h;
113 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
114 struct tcf_result *res)
116 struct {
117 struct tc_u_knode *knode;
118 unsigned int off;
119 } stack[TC_U32_MAXDEPTH];
121 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
122 unsigned int off = skb_network_offset(skb);
123 struct tc_u_knode *n;
124 int sdepth = 0;
125 int off2 = 0;
126 int sel = 0;
127 #ifdef CONFIG_CLS_U32_PERF
128 int j;
129 #endif
130 int i, r;
132 next_ht:
133 n = rcu_dereference_bh(ht->ht[sel]);
135 next_knode:
136 if (n) {
137 struct tc_u32_key *key = n->sel.keys;
139 #ifdef CONFIG_CLS_U32_PERF
140 __this_cpu_inc(n->pf->rcnt);
141 j = 0;
142 #endif
144 if (tc_skip_sw(n->flags)) {
145 n = rcu_dereference_bh(n->next);
146 goto next_knode;
149 #ifdef CONFIG_CLS_U32_MARK
150 if ((skb->mark & n->mask) != n->val) {
151 n = rcu_dereference_bh(n->next);
152 goto next_knode;
153 } else {
154 __this_cpu_inc(*n->pcpu_success);
156 #endif
158 for (i = n->sel.nkeys; i > 0; i--, key++) {
159 int toff = off + key->off + (off2 & key->offmask);
160 __be32 *data, hdata;
162 if (skb_headroom(skb) + toff > INT_MAX)
163 goto out;
165 data = skb_header_pointer(skb, toff, 4, &hdata);
166 if (!data)
167 goto out;
168 if ((*data ^ key->val) & key->mask) {
169 n = rcu_dereference_bh(n->next);
170 goto next_knode;
172 #ifdef CONFIG_CLS_U32_PERF
173 __this_cpu_inc(n->pf->kcnts[j]);
174 j++;
175 #endif
178 ht = rcu_dereference_bh(n->ht_down);
179 if (!ht) {
180 check_terminal:
181 if (n->sel.flags & TC_U32_TERMINAL) {
183 *res = n->res;
184 #ifdef CONFIG_NET_CLS_IND
185 if (!tcf_match_indev(skb, n->ifindex)) {
186 n = rcu_dereference_bh(n->next);
187 goto next_knode;
189 #endif
190 #ifdef CONFIG_CLS_U32_PERF
191 __this_cpu_inc(n->pf->rhit);
192 #endif
193 r = tcf_exts_exec(skb, &n->exts, res);
194 if (r < 0) {
195 n = rcu_dereference_bh(n->next);
196 goto next_knode;
199 return r;
201 n = rcu_dereference_bh(n->next);
202 goto next_knode;
205 /* PUSH */
206 if (sdepth >= TC_U32_MAXDEPTH)
207 goto deadloop;
208 stack[sdepth].knode = n;
209 stack[sdepth].off = off;
210 sdepth++;
212 ht = rcu_dereference_bh(n->ht_down);
213 sel = 0;
214 if (ht->divisor) {
215 __be32 *data, hdata;
217 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
218 &hdata);
219 if (!data)
220 goto out;
221 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
222 n->fshift);
224 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
225 goto next_ht;
227 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
228 off2 = n->sel.off + 3;
229 if (n->sel.flags & TC_U32_VAROFFSET) {
230 __be16 *data, hdata;
232 data = skb_header_pointer(skb,
233 off + n->sel.offoff,
234 2, &hdata);
235 if (!data)
236 goto out;
237 off2 += ntohs(n->sel.offmask & *data) >>
238 n->sel.offshift;
240 off2 &= ~3;
242 if (n->sel.flags & TC_U32_EAT) {
243 off += off2;
244 off2 = 0;
247 if (off < skb->len)
248 goto next_ht;
251 /* POP */
252 if (sdepth--) {
253 n = stack[sdepth].knode;
254 ht = rcu_dereference_bh(n->ht_up);
255 off = stack[sdepth].off;
256 goto check_terminal;
258 out:
259 return -1;
261 deadloop:
262 net_warn_ratelimited("cls_u32: dead loop\n");
263 return -1;
266 static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
268 struct tc_u_hnode *ht;
270 for (ht = rtnl_dereference(tp_c->hlist);
272 ht = rtnl_dereference(ht->next))
273 if (ht->handle == handle)
274 break;
276 return ht;
279 static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
281 unsigned int sel;
282 struct tc_u_knode *n = NULL;
284 sel = TC_U32_HASH(handle);
285 if (sel > ht->divisor)
286 goto out;
288 for (n = rtnl_dereference(ht->ht[sel]);
290 n = rtnl_dereference(n->next))
291 if (n->handle == handle)
292 break;
293 out:
294 return n;
298 static void *u32_get(struct tcf_proto *tp, u32 handle)
300 struct tc_u_hnode *ht;
301 struct tc_u_common *tp_c = tp->data;
303 if (TC_U32_HTID(handle) == TC_U32_ROOT)
304 ht = rtnl_dereference(tp->root);
305 else
306 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
308 if (!ht)
309 return NULL;
311 if (TC_U32_KEY(handle) == 0)
312 return ht;
314 return u32_lookup_key(ht, handle);
317 /* Protected by rtnl lock */
318 static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
320 int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
321 if (id < 0)
322 return 0;
323 return (id | 0x800U) << 20;
326 static struct hlist_head *tc_u_common_hash;
328 #define U32_HASH_SHIFT 10
329 #define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
331 static void *tc_u_common_ptr(const struct tcf_proto *tp)
333 struct tcf_block *block = tp->chain->block;
335 /* The block sharing is currently supported only
336 * for classless qdiscs. In that case we use block
337 * for tc_u_common identification. In case the
338 * block is not shared, block->q is a valid pointer
339 * and we can use that. That works for classful qdiscs.
341 if (tcf_block_shared(block))
342 return block;
343 else
344 return block->q;
347 static unsigned int tc_u_hash(const struct tcf_proto *tp)
349 return hash_ptr(tc_u_common_ptr(tp), U32_HASH_SHIFT);
352 static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
354 struct tc_u_common *tc;
355 unsigned int h;
357 h = tc_u_hash(tp);
358 hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) {
359 if (tc->ptr == tc_u_common_ptr(tp))
360 return tc;
362 return NULL;
365 static int u32_init(struct tcf_proto *tp)
367 struct tc_u_hnode *root_ht;
368 struct tc_u_common *tp_c;
369 unsigned int h;
371 tp_c = tc_u_common_find(tp);
373 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
374 if (root_ht == NULL)
375 return -ENOBUFS;
377 root_ht->refcnt++;
378 root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
379 root_ht->prio = tp->prio;
380 idr_init(&root_ht->handle_idr);
382 if (tp_c == NULL) {
383 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
384 if (tp_c == NULL) {
385 kfree(root_ht);
386 return -ENOBUFS;
388 tp_c->ptr = tc_u_common_ptr(tp);
389 INIT_HLIST_NODE(&tp_c->hnode);
390 idr_init(&tp_c->handle_idr);
392 h = tc_u_hash(tp);
393 hlist_add_head(&tp_c->hnode, &tc_u_common_hash[h]);
396 tp_c->refcnt++;
397 RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
398 rcu_assign_pointer(tp_c->hlist, root_ht);
399 root_ht->tp_c = tp_c;
401 root_ht->refcnt++;
402 rcu_assign_pointer(tp->root, root_ht);
403 tp->data = tp_c;
404 return 0;
407 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
408 bool free_pf)
410 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
412 tcf_exts_destroy(&n->exts);
413 tcf_exts_put_net(&n->exts);
414 if (ht && --ht->refcnt == 0)
415 kfree(ht);
416 #ifdef CONFIG_CLS_U32_PERF
417 if (free_pf)
418 free_percpu(n->pf);
419 #endif
420 #ifdef CONFIG_CLS_U32_MARK
421 if (free_pf)
422 free_percpu(n->pcpu_success);
423 #endif
424 kfree(n);
425 return 0;
428 /* u32_delete_key_rcu should be called when free'ing a copied
429 * version of a tc_u_knode obtained from u32_init_knode(). When
430 * copies are obtained from u32_init_knode() the statistics are
431 * shared between the old and new copies to allow readers to
432 * continue to update the statistics during the copy. To support
433 * this the u32_delete_key_rcu variant does not free the percpu
434 * statistics.
436 static void u32_delete_key_work(struct work_struct *work)
438 struct tc_u_knode *key = container_of(to_rcu_work(work),
439 struct tc_u_knode,
440 rwork);
441 rtnl_lock();
442 u32_destroy_key(key->tp, key, false);
443 rtnl_unlock();
446 /* u32_delete_key_freepf_rcu is the rcu callback variant
447 * that free's the entire structure including the statistics
448 * percpu variables. Only use this if the key is not a copy
449 * returned by u32_init_knode(). See u32_delete_key_rcu()
450 * for the variant that should be used with keys return from
451 * u32_init_knode()
453 static void u32_delete_key_freepf_work(struct work_struct *work)
455 struct tc_u_knode *key = container_of(to_rcu_work(work),
456 struct tc_u_knode,
457 rwork);
458 rtnl_lock();
459 u32_destroy_key(key->tp, key, true);
460 rtnl_unlock();
463 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
465 struct tc_u_knode __rcu **kp;
466 struct tc_u_knode *pkp;
467 struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
469 if (ht) {
470 kp = &ht->ht[TC_U32_HASH(key->handle)];
471 for (pkp = rtnl_dereference(*kp); pkp;
472 kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
473 if (pkp == key) {
474 RCU_INIT_POINTER(*kp, key->next);
476 tcf_unbind_filter(tp, &key->res);
477 idr_remove(&ht->handle_idr, key->handle);
478 tcf_exts_get_net(&key->exts);
479 tcf_queue_work(&key->rwork, u32_delete_key_freepf_work);
480 return 0;
484 WARN_ON(1);
485 return 0;
488 static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
489 struct netlink_ext_ack *extack)
491 struct tcf_block *block = tp->chain->block;
492 struct tc_cls_u32_offload cls_u32 = {};
494 tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
495 cls_u32.command = TC_CLSU32_DELETE_HNODE;
496 cls_u32.hnode.divisor = h->divisor;
497 cls_u32.hnode.handle = h->handle;
498 cls_u32.hnode.prio = h->prio;
500 tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
503 static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
504 u32 flags, struct netlink_ext_ack *extack)
506 struct tcf_block *block = tp->chain->block;
507 struct tc_cls_u32_offload cls_u32 = {};
508 bool skip_sw = tc_skip_sw(flags);
509 bool offloaded = false;
510 int err;
512 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
513 cls_u32.command = TC_CLSU32_NEW_HNODE;
514 cls_u32.hnode.divisor = h->divisor;
515 cls_u32.hnode.handle = h->handle;
516 cls_u32.hnode.prio = h->prio;
518 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
519 if (err < 0) {
520 u32_clear_hw_hnode(tp, h, NULL);
521 return err;
522 } else if (err > 0) {
523 offloaded = true;
526 if (skip_sw && !offloaded)
527 return -EINVAL;
529 return 0;
532 static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
533 struct netlink_ext_ack *extack)
535 struct tcf_block *block = tp->chain->block;
536 struct tc_cls_u32_offload cls_u32 = {};
538 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
539 cls_u32.command = TC_CLSU32_DELETE_KNODE;
540 cls_u32.knode.handle = n->handle;
542 tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
543 tcf_block_offload_dec(block, &n->flags);
546 static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
547 u32 flags, struct netlink_ext_ack *extack)
549 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
550 struct tcf_block *block = tp->chain->block;
551 struct tc_cls_u32_offload cls_u32 = {};
552 bool skip_sw = tc_skip_sw(flags);
553 int err;
555 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
556 cls_u32.command = TC_CLSU32_REPLACE_KNODE;
557 cls_u32.knode.handle = n->handle;
558 cls_u32.knode.fshift = n->fshift;
559 #ifdef CONFIG_CLS_U32_MARK
560 cls_u32.knode.val = n->val;
561 cls_u32.knode.mask = n->mask;
562 #else
563 cls_u32.knode.val = 0;
564 cls_u32.knode.mask = 0;
565 #endif
566 cls_u32.knode.sel = &n->sel;
567 cls_u32.knode.exts = &n->exts;
568 if (n->ht_down)
569 cls_u32.knode.link_handle = ht->handle;
571 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
572 if (err < 0) {
573 u32_remove_hw_knode(tp, n, NULL);
574 return err;
575 } else if (err > 0) {
576 n->in_hw_count = err;
577 tcf_block_offload_inc(block, &n->flags);
580 if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
581 return -EINVAL;
583 return 0;
586 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
587 struct netlink_ext_ack *extack)
589 struct tc_u_knode *n;
590 unsigned int h;
592 for (h = 0; h <= ht->divisor; h++) {
593 while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
594 RCU_INIT_POINTER(ht->ht[h],
595 rtnl_dereference(n->next));
596 tcf_unbind_filter(tp, &n->res);
597 u32_remove_hw_knode(tp, n, extack);
598 idr_remove(&ht->handle_idr, n->handle);
599 if (tcf_exts_get_net(&n->exts))
600 tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
601 else
602 u32_destroy_key(n->tp, n, true);
607 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
608 struct netlink_ext_ack *extack)
610 struct tc_u_common *tp_c = tp->data;
611 struct tc_u_hnode __rcu **hn;
612 struct tc_u_hnode *phn;
614 WARN_ON(--ht->refcnt);
616 u32_clear_hnode(tp, ht, extack);
618 hn = &tp_c->hlist;
619 for (phn = rtnl_dereference(*hn);
620 phn;
621 hn = &phn->next, phn = rtnl_dereference(*hn)) {
622 if (phn == ht) {
623 u32_clear_hw_hnode(tp, ht, extack);
624 idr_destroy(&ht->handle_idr);
625 idr_remove(&tp_c->handle_idr, ht->handle);
626 RCU_INIT_POINTER(*hn, ht->next);
627 kfree_rcu(ht, rcu);
628 return 0;
632 return -ENOENT;
635 static bool ht_empty(struct tc_u_hnode *ht)
637 unsigned int h;
639 for (h = 0; h <= ht->divisor; h++)
640 if (rcu_access_pointer(ht->ht[h]))
641 return false;
643 return true;
646 static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
648 struct tc_u_common *tp_c = tp->data;
649 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
651 WARN_ON(root_ht == NULL);
653 if (root_ht && --root_ht->refcnt == 1)
654 u32_destroy_hnode(tp, root_ht, extack);
656 if (--tp_c->refcnt == 0) {
657 struct tc_u_hnode *ht;
659 hlist_del(&tp_c->hnode);
661 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
662 u32_clear_hnode(tp, ht, extack);
663 RCU_INIT_POINTER(tp_c->hlist, ht->next);
665 /* u32_destroy_key() will later free ht for us, if it's
666 * still referenced by some knode
668 if (--ht->refcnt == 0)
669 kfree_rcu(ht, rcu);
672 idr_destroy(&tp_c->handle_idr);
673 kfree(tp_c);
676 tp->data = NULL;
679 static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
680 struct netlink_ext_ack *extack)
682 struct tc_u_hnode *ht = arg;
683 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
684 struct tc_u_common *tp_c = tp->data;
685 int ret = 0;
687 if (ht == NULL)
688 goto out;
690 if (TC_U32_KEY(ht->handle)) {
691 u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
692 ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
693 goto out;
696 if (root_ht == ht) {
697 NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
698 return -EINVAL;
701 if (ht->refcnt == 1) {
702 u32_destroy_hnode(tp, ht, extack);
703 } else {
704 NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
705 return -EBUSY;
708 out:
709 *last = true;
710 if (root_ht) {
711 if (root_ht->refcnt > 2) {
712 *last = false;
713 goto ret;
715 if (root_ht->refcnt == 2) {
716 if (!ht_empty(root_ht)) {
717 *last = false;
718 goto ret;
723 if (tp_c->refcnt > 1) {
724 *last = false;
725 goto ret;
728 if (tp_c->refcnt == 1) {
729 struct tc_u_hnode *ht;
731 for (ht = rtnl_dereference(tp_c->hlist);
733 ht = rtnl_dereference(ht->next))
734 if (!ht_empty(ht)) {
735 *last = false;
736 break;
740 ret:
741 return ret;
744 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
746 u32 index = htid | 0x800;
747 u32 max = htid | 0xFFF;
749 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
750 index = htid + 1;
751 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
752 GFP_KERNEL))
753 index = max;
756 return index;
759 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
760 [TCA_U32_CLASSID] = { .type = NLA_U32 },
761 [TCA_U32_HASH] = { .type = NLA_U32 },
762 [TCA_U32_LINK] = { .type = NLA_U32 },
763 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
764 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
765 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
766 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
767 [TCA_U32_FLAGS] = { .type = NLA_U32 },
770 static int u32_set_parms(struct net *net, struct tcf_proto *tp,
771 unsigned long base, struct tc_u_hnode *ht,
772 struct tc_u_knode *n, struct nlattr **tb,
773 struct nlattr *est, bool ovr,
774 struct netlink_ext_ack *extack)
776 int err;
778 err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, extack);
779 if (err < 0)
780 return err;
782 if (tb[TCA_U32_LINK]) {
783 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
784 struct tc_u_hnode *ht_down = NULL, *ht_old;
786 if (TC_U32_KEY(handle)) {
787 NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
788 return -EINVAL;
791 if (handle) {
792 ht_down = u32_lookup_ht(ht->tp_c, handle);
794 if (!ht_down) {
795 NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
796 return -EINVAL;
798 ht_down->refcnt++;
801 ht_old = rtnl_dereference(n->ht_down);
802 rcu_assign_pointer(n->ht_down, ht_down);
804 if (ht_old)
805 ht_old->refcnt--;
807 if (tb[TCA_U32_CLASSID]) {
808 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
809 tcf_bind_filter(tp, &n->res, base);
812 #ifdef CONFIG_NET_CLS_IND
813 if (tb[TCA_U32_INDEV]) {
814 int ret;
815 ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
816 if (ret < 0)
817 return -EINVAL;
818 n->ifindex = ret;
820 #endif
821 return 0;
824 static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
825 struct tc_u_knode *n)
827 struct tc_u_knode __rcu **ins;
828 struct tc_u_knode *pins;
829 struct tc_u_hnode *ht;
831 if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
832 ht = rtnl_dereference(tp->root);
833 else
834 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
836 ins = &ht->ht[TC_U32_HASH(n->handle)];
838 /* The node must always exist for it to be replaced if this is not the
839 * case then something went very wrong elsewhere.
841 for (pins = rtnl_dereference(*ins); ;
842 ins = &pins->next, pins = rtnl_dereference(*ins))
843 if (pins->handle == n->handle)
844 break;
846 idr_replace(&ht->handle_idr, n, n->handle);
847 RCU_INIT_POINTER(n->next, pins->next);
848 rcu_assign_pointer(*ins, n);
851 static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
852 struct tc_u_knode *n)
854 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
855 struct tc_u32_sel *s = &n->sel;
856 struct tc_u_knode *new;
858 new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
859 GFP_KERNEL);
861 if (!new)
862 return NULL;
864 RCU_INIT_POINTER(new->next, n->next);
865 new->handle = n->handle;
866 RCU_INIT_POINTER(new->ht_up, n->ht_up);
868 #ifdef CONFIG_NET_CLS_IND
869 new->ifindex = n->ifindex;
870 #endif
871 new->fshift = n->fshift;
872 new->res = n->res;
873 new->flags = n->flags;
874 RCU_INIT_POINTER(new->ht_down, ht);
876 /* bump reference count as long as we hold pointer to structure */
877 if (ht)
878 ht->refcnt++;
880 #ifdef CONFIG_CLS_U32_PERF
881 /* Statistics may be incremented by readers during update
882 * so we must keep them in tact. When the node is later destroyed
883 * a special destroy call must be made to not free the pf memory.
885 new->pf = n->pf;
886 #endif
888 #ifdef CONFIG_CLS_U32_MARK
889 new->val = n->val;
890 new->mask = n->mask;
891 /* Similarly success statistics must be moved as pointers */
892 new->pcpu_success = n->pcpu_success;
893 #endif
894 new->tp = tp;
895 memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
897 if (tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE)) {
898 kfree(new);
899 return NULL;
902 return new;
905 static int u32_change(struct net *net, struct sk_buff *in_skb,
906 struct tcf_proto *tp, unsigned long base, u32 handle,
907 struct nlattr **tca, void **arg, bool ovr,
908 struct netlink_ext_ack *extack)
910 struct tc_u_common *tp_c = tp->data;
911 struct tc_u_hnode *ht;
912 struct tc_u_knode *n;
913 struct tc_u32_sel *s;
914 struct nlattr *opt = tca[TCA_OPTIONS];
915 struct nlattr *tb[TCA_U32_MAX + 1];
916 u32 htid, flags = 0;
917 size_t sel_size;
918 int err;
919 #ifdef CONFIG_CLS_U32_PERF
920 size_t size;
921 #endif
923 if (!opt) {
924 if (handle) {
925 NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
926 return -EINVAL;
927 } else {
928 return 0;
932 err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy, extack);
933 if (err < 0)
934 return err;
936 if (tb[TCA_U32_FLAGS]) {
937 flags = nla_get_u32(tb[TCA_U32_FLAGS]);
938 if (!tc_flags_valid(flags)) {
939 NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
940 return -EINVAL;
944 n = *arg;
945 if (n) {
946 struct tc_u_knode *new;
948 if (TC_U32_KEY(n->handle) == 0) {
949 NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
950 return -EINVAL;
953 if ((n->flags ^ flags) &
954 ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
955 NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
956 return -EINVAL;
959 new = u32_init_knode(tp, n);
960 if (!new)
961 return -ENOMEM;
963 err = u32_set_parms(net, tp, base,
964 rtnl_dereference(n->ht_up), new, tb,
965 tca[TCA_RATE], ovr, extack);
967 if (err) {
968 u32_destroy_key(tp, new, false);
969 return err;
972 err = u32_replace_hw_knode(tp, new, flags, extack);
973 if (err) {
974 u32_destroy_key(tp, new, false);
975 return err;
978 if (!tc_in_hw(new->flags))
979 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
981 u32_replace_knode(tp, tp_c, new);
982 tcf_unbind_filter(tp, &n->res);
983 tcf_exts_get_net(&n->exts);
984 tcf_queue_work(&n->rwork, u32_delete_key_work);
985 return 0;
988 if (tb[TCA_U32_DIVISOR]) {
989 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
991 if (--divisor > 0x100) {
992 NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
993 return -EINVAL;
995 if (TC_U32_KEY(handle)) {
996 NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
997 return -EINVAL;
999 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
1000 if (ht == NULL)
1001 return -ENOBUFS;
1002 if (handle == 0) {
1003 handle = gen_new_htid(tp->data, ht);
1004 if (handle == 0) {
1005 kfree(ht);
1006 return -ENOMEM;
1008 } else {
1009 err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
1010 handle, GFP_KERNEL);
1011 if (err) {
1012 kfree(ht);
1013 return err;
1016 ht->tp_c = tp_c;
1017 ht->refcnt = 1;
1018 ht->divisor = divisor;
1019 ht->handle = handle;
1020 ht->prio = tp->prio;
1021 idr_init(&ht->handle_idr);
1022 ht->flags = flags;
1024 err = u32_replace_hw_hnode(tp, ht, flags, extack);
1025 if (err) {
1026 idr_remove(&tp_c->handle_idr, handle);
1027 kfree(ht);
1028 return err;
1031 RCU_INIT_POINTER(ht->next, tp_c->hlist);
1032 rcu_assign_pointer(tp_c->hlist, ht);
1033 *arg = ht;
1035 return 0;
1038 if (tb[TCA_U32_HASH]) {
1039 htid = nla_get_u32(tb[TCA_U32_HASH]);
1040 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
1041 ht = rtnl_dereference(tp->root);
1042 htid = ht->handle;
1043 } else {
1044 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
1045 if (!ht) {
1046 NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
1047 return -EINVAL;
1050 } else {
1051 ht = rtnl_dereference(tp->root);
1052 htid = ht->handle;
1055 if (ht->divisor < TC_U32_HASH(htid)) {
1056 NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
1057 return -EINVAL;
1060 if (handle) {
1061 if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
1062 NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
1063 return -EINVAL;
1065 handle = htid | TC_U32_NODE(handle);
1066 err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle,
1067 GFP_KERNEL);
1068 if (err)
1069 return err;
1070 } else
1071 handle = gen_new_kid(ht, htid);
1073 if (tb[TCA_U32_SEL] == NULL) {
1074 NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
1075 err = -EINVAL;
1076 goto erridr;
1079 s = nla_data(tb[TCA_U32_SEL]);
1080 sel_size = struct_size(s, keys, s->nkeys);
1081 if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
1082 err = -EINVAL;
1083 goto erridr;
1086 n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
1087 if (n == NULL) {
1088 err = -ENOBUFS;
1089 goto erridr;
1092 #ifdef CONFIG_CLS_U32_PERF
1093 size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
1094 n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
1095 if (!n->pf) {
1096 err = -ENOBUFS;
1097 goto errfree;
1099 #endif
1101 memcpy(&n->sel, s, sel_size);
1102 RCU_INIT_POINTER(n->ht_up, ht);
1103 n->handle = handle;
1104 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
1105 n->flags = flags;
1106 n->tp = tp;
1108 err = tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
1109 if (err < 0)
1110 goto errout;
1112 #ifdef CONFIG_CLS_U32_MARK
1113 n->pcpu_success = alloc_percpu(u32);
1114 if (!n->pcpu_success) {
1115 err = -ENOMEM;
1116 goto errout;
1119 if (tb[TCA_U32_MARK]) {
1120 struct tc_u32_mark *mark;
1122 mark = nla_data(tb[TCA_U32_MARK]);
1123 n->val = mark->val;
1124 n->mask = mark->mask;
1126 #endif
1128 err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr,
1129 extack);
1130 if (err == 0) {
1131 struct tc_u_knode __rcu **ins;
1132 struct tc_u_knode *pins;
1134 err = u32_replace_hw_knode(tp, n, flags, extack);
1135 if (err)
1136 goto errhw;
1138 if (!tc_in_hw(n->flags))
1139 n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1141 ins = &ht->ht[TC_U32_HASH(handle)];
1142 for (pins = rtnl_dereference(*ins); pins;
1143 ins = &pins->next, pins = rtnl_dereference(*ins))
1144 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1145 break;
1147 RCU_INIT_POINTER(n->next, pins);
1148 rcu_assign_pointer(*ins, n);
1149 *arg = n;
1150 return 0;
1153 errhw:
1154 #ifdef CONFIG_CLS_U32_MARK
1155 free_percpu(n->pcpu_success);
1156 #endif
1158 errout:
1159 tcf_exts_destroy(&n->exts);
1160 #ifdef CONFIG_CLS_U32_PERF
1161 errfree:
1162 free_percpu(n->pf);
1163 #endif
1164 kfree(n);
1165 erridr:
1166 idr_remove(&ht->handle_idr, handle);
1167 return err;
1170 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
1172 struct tc_u_common *tp_c = tp->data;
1173 struct tc_u_hnode *ht;
1174 struct tc_u_knode *n;
1175 unsigned int h;
1177 if (arg->stop)
1178 return;
1180 for (ht = rtnl_dereference(tp_c->hlist);
1182 ht = rtnl_dereference(ht->next)) {
1183 if (ht->prio != tp->prio)
1184 continue;
1185 if (arg->count >= arg->skip) {
1186 if (arg->fn(tp, ht, arg) < 0) {
1187 arg->stop = 1;
1188 return;
1191 arg->count++;
1192 for (h = 0; h <= ht->divisor; h++) {
1193 for (n = rtnl_dereference(ht->ht[h]);
1195 n = rtnl_dereference(n->next)) {
1196 if (arg->count < arg->skip) {
1197 arg->count++;
1198 continue;
1200 if (arg->fn(tp, n, arg) < 0) {
1201 arg->stop = 1;
1202 return;
1204 arg->count++;
1210 static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1211 bool add, tc_setup_cb_t *cb, void *cb_priv,
1212 struct netlink_ext_ack *extack)
1214 struct tc_cls_u32_offload cls_u32 = {};
1215 int err;
1217 tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
1218 cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
1219 cls_u32.hnode.divisor = ht->divisor;
1220 cls_u32.hnode.handle = ht->handle;
1221 cls_u32.hnode.prio = ht->prio;
1223 err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1224 if (err && add && tc_skip_sw(ht->flags))
1225 return err;
1227 return 0;
1230 static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1231 bool add, tc_setup_cb_t *cb, void *cb_priv,
1232 struct netlink_ext_ack *extack)
1234 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
1235 struct tcf_block *block = tp->chain->block;
1236 struct tc_cls_u32_offload cls_u32 = {};
1237 int err;
1239 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
1240 cls_u32.command = add ?
1241 TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
1242 cls_u32.knode.handle = n->handle;
1244 if (add) {
1245 cls_u32.knode.fshift = n->fshift;
1246 #ifdef CONFIG_CLS_U32_MARK
1247 cls_u32.knode.val = n->val;
1248 cls_u32.knode.mask = n->mask;
1249 #else
1250 cls_u32.knode.val = 0;
1251 cls_u32.knode.mask = 0;
1252 #endif
1253 cls_u32.knode.sel = &n->sel;
1254 cls_u32.knode.exts = &n->exts;
1255 if (n->ht_down)
1256 cls_u32.knode.link_handle = ht->handle;
1259 err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1260 if (err) {
1261 if (add && tc_skip_sw(n->flags))
1262 return err;
1263 return 0;
1266 tc_cls_offload_cnt_update(block, &n->in_hw_count, &n->flags, add);
1268 return 0;
1271 static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
1272 void *cb_priv, struct netlink_ext_ack *extack)
1274 struct tc_u_common *tp_c = tp->data;
1275 struct tc_u_hnode *ht;
1276 struct tc_u_knode *n;
1277 unsigned int h;
1278 int err;
1280 for (ht = rtnl_dereference(tp_c->hlist);
1282 ht = rtnl_dereference(ht->next)) {
1283 if (ht->prio != tp->prio)
1284 continue;
1286 /* When adding filters to a new dev, try to offload the
1287 * hashtable first. When removing, do the filters before the
1288 * hashtable.
1290 if (add && !tc_skip_hw(ht->flags)) {
1291 err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
1292 extack);
1293 if (err)
1294 return err;
1297 for (h = 0; h <= ht->divisor; h++) {
1298 for (n = rtnl_dereference(ht->ht[h]);
1300 n = rtnl_dereference(n->next)) {
1301 if (tc_skip_hw(n->flags))
1302 continue;
1304 err = u32_reoffload_knode(tp, n, add, cb,
1305 cb_priv, extack);
1306 if (err)
1307 return err;
1311 if (!add && !tc_skip_hw(ht->flags))
1312 u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
1315 return 0;
1318 static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
1319 unsigned long base)
1321 struct tc_u_knode *n = fh;
1323 if (n && n->res.classid == classid) {
1324 if (cl)
1325 __tcf_bind_filter(q, &n->res, base);
1326 else
1327 __tcf_unbind_filter(q, &n->res);
1331 static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
1332 struct sk_buff *skb, struct tcmsg *t)
1334 struct tc_u_knode *n = fh;
1335 struct tc_u_hnode *ht_up, *ht_down;
1336 struct nlattr *nest;
1338 if (n == NULL)
1339 return skb->len;
1341 t->tcm_handle = n->handle;
1343 nest = nla_nest_start(skb, TCA_OPTIONS);
1344 if (nest == NULL)
1345 goto nla_put_failure;
1347 if (TC_U32_KEY(n->handle) == 0) {
1348 struct tc_u_hnode *ht = fh;
1349 u32 divisor = ht->divisor + 1;
1351 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1352 goto nla_put_failure;
1353 } else {
1354 #ifdef CONFIG_CLS_U32_PERF
1355 struct tc_u32_pcnt *gpf;
1356 int cpu;
1357 #endif
1359 if (nla_put(skb, TCA_U32_SEL,
1360 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
1361 &n->sel))
1362 goto nla_put_failure;
1364 ht_up = rtnl_dereference(n->ht_up);
1365 if (ht_up) {
1366 u32 htid = n->handle & 0xFFFFF000;
1367 if (nla_put_u32(skb, TCA_U32_HASH, htid))
1368 goto nla_put_failure;
1370 if (n->res.classid &&
1371 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1372 goto nla_put_failure;
1374 ht_down = rtnl_dereference(n->ht_down);
1375 if (ht_down &&
1376 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1377 goto nla_put_failure;
1379 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1380 goto nla_put_failure;
1382 #ifdef CONFIG_CLS_U32_MARK
1383 if ((n->val || n->mask)) {
1384 struct tc_u32_mark mark = {.val = n->val,
1385 .mask = n->mask,
1386 .success = 0};
1387 int cpum;
1389 for_each_possible_cpu(cpum) {
1390 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1392 mark.success += cnt;
1395 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1396 goto nla_put_failure;
1398 #endif
1400 if (tcf_exts_dump(skb, &n->exts) < 0)
1401 goto nla_put_failure;
1403 #ifdef CONFIG_NET_CLS_IND
1404 if (n->ifindex) {
1405 struct net_device *dev;
1406 dev = __dev_get_by_index(net, n->ifindex);
1407 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1408 goto nla_put_failure;
1410 #endif
1411 #ifdef CONFIG_CLS_U32_PERF
1412 gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
1413 n->sel.nkeys * sizeof(u64),
1414 GFP_KERNEL);
1415 if (!gpf)
1416 goto nla_put_failure;
1418 for_each_possible_cpu(cpu) {
1419 int i;
1420 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1422 gpf->rcnt += pf->rcnt;
1423 gpf->rhit += pf->rhit;
1424 for (i = 0; i < n->sel.nkeys; i++)
1425 gpf->kcnts[i] += pf->kcnts[i];
1428 if (nla_put_64bit(skb, TCA_U32_PCNT,
1429 sizeof(struct tc_u32_pcnt) +
1430 n->sel.nkeys * sizeof(u64),
1431 gpf, TCA_U32_PAD)) {
1432 kfree(gpf);
1433 goto nla_put_failure;
1435 kfree(gpf);
1436 #endif
1439 nla_nest_end(skb, nest);
1441 if (TC_U32_KEY(n->handle))
1442 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1443 goto nla_put_failure;
1444 return skb->len;
1446 nla_put_failure:
1447 nla_nest_cancel(skb, nest);
1448 return -1;
1451 static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1452 .kind = "u32",
1453 .classify = u32_classify,
1454 .init = u32_init,
1455 .destroy = u32_destroy,
1456 .get = u32_get,
1457 .change = u32_change,
1458 .delete = u32_delete,
1459 .walk = u32_walk,
1460 .reoffload = u32_reoffload,
1461 .dump = u32_dump,
1462 .bind_class = u32_bind_class,
1463 .owner = THIS_MODULE,
1466 static int __init init_u32(void)
1468 int i, ret;
1470 pr_info("u32 classifier\n");
1471 #ifdef CONFIG_CLS_U32_PERF
1472 pr_info(" Performance counters on\n");
1473 #endif
1474 #ifdef CONFIG_NET_CLS_IND
1475 pr_info(" input device check on\n");
1476 #endif
1477 #ifdef CONFIG_NET_CLS_ACT
1478 pr_info(" Actions configured\n");
1479 #endif
1480 tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
1481 sizeof(struct hlist_head),
1482 GFP_KERNEL);
1483 if (!tc_u_common_hash)
1484 return -ENOMEM;
1486 for (i = 0; i < U32_HASH_SIZE; i++)
1487 INIT_HLIST_HEAD(&tc_u_common_hash[i]);
1489 ret = register_tcf_proto_ops(&cls_u32_ops);
1490 if (ret)
1491 kvfree(tc_u_common_hash);
1492 return ret;
1495 static void __exit exit_u32(void)
1497 unregister_tcf_proto_ops(&cls_u32_ops);
1498 kvfree(tc_u_common_hash);
1501 module_init(init_u32)
1502 module_exit(exit_u32)
1503 MODULE_LICENSE("GPL");