ia64/pv_ops/xen: define the nubmer of irqs which xen needs.
[pv_ops_mirror.git] / net / sched / cls_u32.c
blob4d755444c449f04c6eb93a33ba9d20fc5f0a2a22
1 /*
2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
27 * JHS: We should remove the CONFIG_NET_CLS_IND from here
28 * eventually when the meta match extension is made available
30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
33 #include <linux/module.h>
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/string.h>
37 #include <linux/errno.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/skbuff.h>
40 #include <net/netlink.h>
41 #include <net/act_api.h>
42 #include <net/pkt_cls.h>
44 struct tc_u_knode
46 struct tc_u_knode *next;
47 u32 handle;
48 struct tc_u_hnode *ht_up;
49 struct tcf_exts exts;
50 #ifdef CONFIG_NET_CLS_IND
51 char indev[IFNAMSIZ];
52 #endif
53 u8 fshift;
54 struct tcf_result res;
55 struct tc_u_hnode *ht_down;
56 #ifdef CONFIG_CLS_U32_PERF
57 struct tc_u32_pcnt *pf;
58 #endif
59 #ifdef CONFIG_CLS_U32_MARK
60 struct tc_u32_mark mark;
61 #endif
62 struct tc_u32_sel sel;
65 struct tc_u_hnode
67 struct tc_u_hnode *next;
68 u32 handle;
69 u32 prio;
70 struct tc_u_common *tp_c;
71 int refcnt;
72 unsigned divisor;
73 struct tc_u_knode *ht[1];
76 struct tc_u_common
78 struct tc_u_common *next;
79 struct tc_u_hnode *hlist;
80 struct Qdisc *q;
81 int refcnt;
82 u32 hgenerator;
85 static const struct tcf_ext_map u32_ext_map = {
86 .action = TCA_U32_ACT,
87 .police = TCA_U32_POLICE
90 static struct tc_u_common *u32_list;
92 static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift)
94 unsigned h = ntohl(key & sel->hmask)>>fshift;
96 return h;
99 static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res)
101 struct {
102 struct tc_u_knode *knode;
103 u8 *ptr;
104 } stack[TC_U32_MAXDEPTH];
106 struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
107 u8 *ptr = skb_network_header(skb);
108 struct tc_u_knode *n;
109 int sdepth = 0;
110 int off2 = 0;
111 int sel = 0;
112 #ifdef CONFIG_CLS_U32_PERF
113 int j;
114 #endif
115 int i, r;
117 next_ht:
118 n = ht->ht[sel];
120 next_knode:
121 if (n) {
122 struct tc_u32_key *key = n->sel.keys;
124 #ifdef CONFIG_CLS_U32_PERF
125 n->pf->rcnt +=1;
126 j = 0;
127 #endif
129 #ifdef CONFIG_CLS_U32_MARK
130 if ((skb->mark & n->mark.mask) != n->mark.val) {
131 n = n->next;
132 goto next_knode;
133 } else {
134 n->mark.success++;
136 #endif
138 for (i = n->sel.nkeys; i>0; i--, key++) {
140 if ((*(__be32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) {
141 n = n->next;
142 goto next_knode;
144 #ifdef CONFIG_CLS_U32_PERF
145 n->pf->kcnts[j] +=1;
146 j++;
147 #endif
149 if (n->ht_down == NULL) {
150 check_terminal:
151 if (n->sel.flags&TC_U32_TERMINAL) {
153 *res = n->res;
154 #ifdef CONFIG_NET_CLS_IND
155 if (!tcf_match_indev(skb, n->indev)) {
156 n = n->next;
157 goto next_knode;
159 #endif
160 #ifdef CONFIG_CLS_U32_PERF
161 n->pf->rhit +=1;
162 #endif
163 r = tcf_exts_exec(skb, &n->exts, res);
164 if (r < 0) {
165 n = n->next;
166 goto next_knode;
169 return r;
171 n = n->next;
172 goto next_knode;
175 /* PUSH */
176 if (sdepth >= TC_U32_MAXDEPTH)
177 goto deadloop;
178 stack[sdepth].knode = n;
179 stack[sdepth].ptr = ptr;
180 sdepth++;
182 ht = n->ht_down;
183 sel = 0;
184 if (ht->divisor)
185 sel = ht->divisor&u32_hash_fold(*(__be32*)(ptr+n->sel.hoff), &n->sel,n->fshift);
187 if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
188 goto next_ht;
190 if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
191 off2 = n->sel.off + 3;
192 if (n->sel.flags&TC_U32_VAROFFSET)
193 off2 += ntohs(n->sel.offmask & *(__be16*)(ptr+n->sel.offoff)) >>n->sel.offshift;
194 off2 &= ~3;
196 if (n->sel.flags&TC_U32_EAT) {
197 ptr += off2;
198 off2 = 0;
201 if (ptr < skb_tail_pointer(skb))
202 goto next_ht;
205 /* POP */
206 if (sdepth--) {
207 n = stack[sdepth].knode;
208 ht = n->ht_up;
209 ptr = stack[sdepth].ptr;
210 goto check_terminal;
212 return -1;
214 deadloop:
215 if (net_ratelimit())
216 printk("cls_u32: dead loop\n");
217 return -1;
220 static __inline__ struct tc_u_hnode *
221 u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
223 struct tc_u_hnode *ht;
225 for (ht = tp_c->hlist; ht; ht = ht->next)
226 if (ht->handle == handle)
227 break;
229 return ht;
232 static __inline__ struct tc_u_knode *
233 u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
235 unsigned sel;
236 struct tc_u_knode *n = NULL;
238 sel = TC_U32_HASH(handle);
239 if (sel > ht->divisor)
240 goto out;
242 for (n = ht->ht[sel]; n; n = n->next)
243 if (n->handle == handle)
244 break;
245 out:
246 return n;
250 static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
252 struct tc_u_hnode *ht;
253 struct tc_u_common *tp_c = tp->data;
255 if (TC_U32_HTID(handle) == TC_U32_ROOT)
256 ht = tp->root;
257 else
258 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
260 if (!ht)
261 return 0;
263 if (TC_U32_KEY(handle) == 0)
264 return (unsigned long)ht;
266 return (unsigned long)u32_lookup_key(ht, handle);
269 static void u32_put(struct tcf_proto *tp, unsigned long f)
273 static u32 gen_new_htid(struct tc_u_common *tp_c)
275 int i = 0x800;
277 do {
278 if (++tp_c->hgenerator == 0x7FF)
279 tp_c->hgenerator = 1;
280 } while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
282 return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
285 static int u32_init(struct tcf_proto *tp)
287 struct tc_u_hnode *root_ht;
288 struct tc_u_common *tp_c;
290 for (tp_c = u32_list; tp_c; tp_c = tp_c->next)
291 if (tp_c->q == tp->q)
292 break;
294 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
295 if (root_ht == NULL)
296 return -ENOBUFS;
298 root_ht->divisor = 0;
299 root_ht->refcnt++;
300 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
301 root_ht->prio = tp->prio;
303 if (tp_c == NULL) {
304 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
305 if (tp_c == NULL) {
306 kfree(root_ht);
307 return -ENOBUFS;
309 tp_c->q = tp->q;
310 tp_c->next = u32_list;
311 u32_list = tp_c;
314 tp_c->refcnt++;
315 root_ht->next = tp_c->hlist;
316 tp_c->hlist = root_ht;
317 root_ht->tp_c = tp_c;
319 tp->root = root_ht;
320 tp->data = tp_c;
321 return 0;
324 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
326 tcf_unbind_filter(tp, &n->res);
327 tcf_exts_destroy(tp, &n->exts);
328 if (n->ht_down)
329 n->ht_down->refcnt--;
330 #ifdef CONFIG_CLS_U32_PERF
331 kfree(n->pf);
332 #endif
333 kfree(n);
334 return 0;
337 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
339 struct tc_u_knode **kp;
340 struct tc_u_hnode *ht = key->ht_up;
342 if (ht) {
343 for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
344 if (*kp == key) {
345 tcf_tree_lock(tp);
346 *kp = key->next;
347 tcf_tree_unlock(tp);
349 u32_destroy_key(tp, key);
350 return 0;
354 BUG_TRAP(0);
355 return 0;
358 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
360 struct tc_u_knode *n;
361 unsigned h;
363 for (h=0; h<=ht->divisor; h++) {
364 while ((n = ht->ht[h]) != NULL) {
365 ht->ht[h] = n->next;
367 u32_destroy_key(tp, n);
372 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
374 struct tc_u_common *tp_c = tp->data;
375 struct tc_u_hnode **hn;
377 BUG_TRAP(!ht->refcnt);
379 u32_clear_hnode(tp, ht);
381 for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
382 if (*hn == ht) {
383 *hn = ht->next;
384 kfree(ht);
385 return 0;
389 BUG_TRAP(0);
390 return -ENOENT;
393 static void u32_destroy(struct tcf_proto *tp)
395 struct tc_u_common *tp_c = tp->data;
396 struct tc_u_hnode *root_ht = xchg(&tp->root, NULL);
398 BUG_TRAP(root_ht != NULL);
400 if (root_ht && --root_ht->refcnt == 0)
401 u32_destroy_hnode(tp, root_ht);
403 if (--tp_c->refcnt == 0) {
404 struct tc_u_hnode *ht;
405 struct tc_u_common **tp_cp;
407 for (tp_cp = &u32_list; *tp_cp; tp_cp = &(*tp_cp)->next) {
408 if (*tp_cp == tp_c) {
409 *tp_cp = tp_c->next;
410 break;
414 for (ht = tp_c->hlist; ht; ht = ht->next) {
415 ht->refcnt--;
416 u32_clear_hnode(tp, ht);
419 while ((ht = tp_c->hlist) != NULL) {
420 tp_c->hlist = ht->next;
422 BUG_TRAP(ht->refcnt == 0);
424 kfree(ht);
427 kfree(tp_c);
430 tp->data = NULL;
433 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
435 struct tc_u_hnode *ht = (struct tc_u_hnode*)arg;
437 if (ht == NULL)
438 return 0;
440 if (TC_U32_KEY(ht->handle))
441 return u32_delete_key(tp, (struct tc_u_knode*)ht);
443 if (tp->root == ht)
444 return -EINVAL;
446 if (ht->refcnt == 1) {
447 ht->refcnt--;
448 u32_destroy_hnode(tp, ht);
449 } else {
450 return -EBUSY;
453 return 0;
456 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
458 struct tc_u_knode *n;
459 unsigned i = 0x7FF;
461 for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
462 if (i < TC_U32_NODE(n->handle))
463 i = TC_U32_NODE(n->handle);
464 i++;
466 return handle|(i>0xFFF ? 0xFFF : i);
469 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
470 [TCA_U32_CLASSID] = { .type = NLA_U32 },
471 [TCA_U32_HASH] = { .type = NLA_U32 },
472 [TCA_U32_LINK] = { .type = NLA_U32 },
473 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
474 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
475 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
476 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
479 static int u32_set_parms(struct tcf_proto *tp, unsigned long base,
480 struct tc_u_hnode *ht,
481 struct tc_u_knode *n, struct nlattr **tb,
482 struct nlattr *est)
484 int err;
485 struct tcf_exts e;
487 err = tcf_exts_validate(tp, tb, est, &e, &u32_ext_map);
488 if (err < 0)
489 return err;
491 err = -EINVAL;
492 if (tb[TCA_U32_LINK]) {
493 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
494 struct tc_u_hnode *ht_down = NULL;
496 if (TC_U32_KEY(handle))
497 goto errout;
499 if (handle) {
500 ht_down = u32_lookup_ht(ht->tp_c, handle);
502 if (ht_down == NULL)
503 goto errout;
504 ht_down->refcnt++;
507 tcf_tree_lock(tp);
508 ht_down = xchg(&n->ht_down, ht_down);
509 tcf_tree_unlock(tp);
511 if (ht_down)
512 ht_down->refcnt--;
514 if (tb[TCA_U32_CLASSID]) {
515 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
516 tcf_bind_filter(tp, &n->res, base);
519 #ifdef CONFIG_NET_CLS_IND
520 if (tb[TCA_U32_INDEV]) {
521 err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV]);
522 if (err < 0)
523 goto errout;
525 #endif
526 tcf_exts_change(tp, &n->exts, &e);
528 return 0;
529 errout:
530 tcf_exts_destroy(tp, &e);
531 return err;
534 static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
535 struct nlattr **tca,
536 unsigned long *arg)
538 struct tc_u_common *tp_c = tp->data;
539 struct tc_u_hnode *ht;
540 struct tc_u_knode *n;
541 struct tc_u32_sel *s;
542 struct nlattr *opt = tca[TCA_OPTIONS];
543 struct nlattr *tb[TCA_U32_MAX + 1];
544 u32 htid;
545 int err;
547 if (opt == NULL)
548 return handle ? -EINVAL : 0;
550 err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy);
551 if (err < 0)
552 return err;
554 if ((n = (struct tc_u_knode*)*arg) != NULL) {
555 if (TC_U32_KEY(n->handle) == 0)
556 return -EINVAL;
558 return u32_set_parms(tp, base, n->ht_up, n, tb, tca[TCA_RATE]);
561 if (tb[TCA_U32_DIVISOR]) {
562 unsigned divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
564 if (--divisor > 0x100)
565 return -EINVAL;
566 if (TC_U32_KEY(handle))
567 return -EINVAL;
568 if (handle == 0) {
569 handle = gen_new_htid(tp->data);
570 if (handle == 0)
571 return -ENOMEM;
573 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
574 if (ht == NULL)
575 return -ENOBUFS;
576 ht->tp_c = tp_c;
577 ht->refcnt = 1;
578 ht->divisor = divisor;
579 ht->handle = handle;
580 ht->prio = tp->prio;
581 ht->next = tp_c->hlist;
582 tp_c->hlist = ht;
583 *arg = (unsigned long)ht;
584 return 0;
587 if (tb[TCA_U32_HASH]) {
588 htid = nla_get_u32(tb[TCA_U32_HASH]);
589 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
590 ht = tp->root;
591 htid = ht->handle;
592 } else {
593 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
594 if (ht == NULL)
595 return -EINVAL;
597 } else {
598 ht = tp->root;
599 htid = ht->handle;
602 if (ht->divisor < TC_U32_HASH(htid))
603 return -EINVAL;
605 if (handle) {
606 if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
607 return -EINVAL;
608 handle = htid | TC_U32_NODE(handle);
609 } else
610 handle = gen_new_kid(ht, htid);
612 if (tb[TCA_U32_SEL] == NULL)
613 return -EINVAL;
615 s = nla_data(tb[TCA_U32_SEL]);
617 n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
618 if (n == NULL)
619 return -ENOBUFS;
621 #ifdef CONFIG_CLS_U32_PERF
622 n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
623 if (n->pf == NULL) {
624 kfree(n);
625 return -ENOBUFS;
627 #endif
629 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
630 n->ht_up = ht;
631 n->handle = handle;
632 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
634 #ifdef CONFIG_CLS_U32_MARK
635 if (tb[TCA_U32_MARK]) {
636 struct tc_u32_mark *mark;
638 mark = nla_data(tb[TCA_U32_MARK]);
639 memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
640 n->mark.success = 0;
642 #endif
644 err = u32_set_parms(tp, base, ht, n, tb, tca[TCA_RATE]);
645 if (err == 0) {
646 struct tc_u_knode **ins;
647 for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
648 if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
649 break;
651 n->next = *ins;
652 wmb();
653 *ins = n;
655 *arg = (unsigned long)n;
656 return 0;
658 #ifdef CONFIG_CLS_U32_PERF
659 kfree(n->pf);
660 #endif
661 kfree(n);
662 return err;
665 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
667 struct tc_u_common *tp_c = tp->data;
668 struct tc_u_hnode *ht;
669 struct tc_u_knode *n;
670 unsigned h;
672 if (arg->stop)
673 return;
675 for (ht = tp_c->hlist; ht; ht = ht->next) {
676 if (ht->prio != tp->prio)
677 continue;
678 if (arg->count >= arg->skip) {
679 if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
680 arg->stop = 1;
681 return;
684 arg->count++;
685 for (h = 0; h <= ht->divisor; h++) {
686 for (n = ht->ht[h]; n; n = n->next) {
687 if (arg->count < arg->skip) {
688 arg->count++;
689 continue;
691 if (arg->fn(tp, (unsigned long)n, arg) < 0) {
692 arg->stop = 1;
693 return;
695 arg->count++;
701 static int u32_dump(struct tcf_proto *tp, unsigned long fh,
702 struct sk_buff *skb, struct tcmsg *t)
704 struct tc_u_knode *n = (struct tc_u_knode*)fh;
705 struct nlattr *nest;
707 if (n == NULL)
708 return skb->len;
710 t->tcm_handle = n->handle;
712 nest = nla_nest_start(skb, TCA_OPTIONS);
713 if (nest == NULL)
714 goto nla_put_failure;
716 if (TC_U32_KEY(n->handle) == 0) {
717 struct tc_u_hnode *ht = (struct tc_u_hnode*)fh;
718 u32 divisor = ht->divisor+1;
719 NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
720 } else {
721 NLA_PUT(skb, TCA_U32_SEL,
722 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
723 &n->sel);
724 if (n->ht_up) {
725 u32 htid = n->handle & 0xFFFFF000;
726 NLA_PUT_U32(skb, TCA_U32_HASH, htid);
728 if (n->res.classid)
729 NLA_PUT_U32(skb, TCA_U32_CLASSID, n->res.classid);
730 if (n->ht_down)
731 NLA_PUT_U32(skb, TCA_U32_LINK, n->ht_down->handle);
733 #ifdef CONFIG_CLS_U32_MARK
734 if (n->mark.val || n->mark.mask)
735 NLA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark);
736 #endif
738 if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
739 goto nla_put_failure;
741 #ifdef CONFIG_NET_CLS_IND
742 if(strlen(n->indev))
743 NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev);
744 #endif
745 #ifdef CONFIG_CLS_U32_PERF
746 NLA_PUT(skb, TCA_U32_PCNT,
747 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
748 n->pf);
749 #endif
752 nla_nest_end(skb, nest);
754 if (TC_U32_KEY(n->handle))
755 if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0)
756 goto nla_put_failure;
757 return skb->len;
759 nla_put_failure:
760 nla_nest_cancel(skb, nest);
761 return -1;
764 static struct tcf_proto_ops cls_u32_ops __read_mostly = {
765 .kind = "u32",
766 .classify = u32_classify,
767 .init = u32_init,
768 .destroy = u32_destroy,
769 .get = u32_get,
770 .put = u32_put,
771 .change = u32_change,
772 .delete = u32_delete,
773 .walk = u32_walk,
774 .dump = u32_dump,
775 .owner = THIS_MODULE,
778 static int __init init_u32(void)
780 printk("u32 classifier\n");
781 #ifdef CONFIG_CLS_U32_PERF
782 printk(" Performance counters on\n");
783 #endif
784 #ifdef CONFIG_NET_CLS_IND
785 printk(" input device check on \n");
786 #endif
787 #ifdef CONFIG_NET_CLS_ACT
788 printk(" Actions configured \n");
789 #endif
790 return register_tcf_proto_ops(&cls_u32_ops);
793 static void __exit exit_u32(void)
795 unregister_tcf_proto_ops(&cls_u32_ops);
798 module_init(init_u32)
799 module_exit(exit_u32)
800 MODULE_LICENSE("GPL");