staging: erofs: integrate decompression inplace
[linux/fpc-iii.git] / net / sched / cls_bpf.c
blob691f71830134be3c5dbb2921edf6e40c433048cf
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Berkeley Packet Filter based traffic classifier
5 * Might be used to classify traffic through flexible, user-defined and
6 * possibly JIT-ed BPF filters for traffic control as an alternative to
7 * ematches.
9 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/skbuff.h>
15 #include <linux/filter.h>
16 #include <linux/bpf.h>
17 #include <linux/idr.h>
19 #include <net/rtnetlink.h>
20 #include <net/pkt_cls.h>
21 #include <net/sock.h>
23 MODULE_LICENSE("GPL");
24 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
25 MODULE_DESCRIPTION("TC BPF based classifier");
27 #define CLS_BPF_NAME_LEN 256
28 #define CLS_BPF_SUPPORTED_GEN_FLAGS \
29 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
31 struct cls_bpf_head {
32 struct list_head plist;
33 struct idr handle_idr;
34 struct rcu_head rcu;
37 struct cls_bpf_prog {
38 struct bpf_prog *filter;
39 struct list_head link;
40 struct tcf_result res;
41 bool exts_integrated;
42 u32 gen_flags;
43 unsigned int in_hw_count;
44 struct tcf_exts exts;
45 u32 handle;
46 u16 bpf_num_ops;
47 struct sock_filter *bpf_ops;
48 const char *bpf_name;
49 struct tcf_proto *tp;
50 struct rcu_work rwork;
53 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
54 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
55 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
56 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
57 [TCA_BPF_FD] = { .type = NLA_U32 },
58 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
59 .len = CLS_BPF_NAME_LEN },
60 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
61 [TCA_BPF_OPS] = { .type = NLA_BINARY,
62 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
65 static int cls_bpf_exec_opcode(int code)
67 switch (code) {
68 case TC_ACT_OK:
69 case TC_ACT_SHOT:
70 case TC_ACT_STOLEN:
71 case TC_ACT_TRAP:
72 case TC_ACT_REDIRECT:
73 case TC_ACT_UNSPEC:
74 return code;
75 default:
76 return TC_ACT_UNSPEC;
80 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
81 struct tcf_result *res)
83 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
84 bool at_ingress = skb_at_tc_ingress(skb);
85 struct cls_bpf_prog *prog;
86 int ret = -1;
88 /* Needed here for accessing maps. */
89 rcu_read_lock();
90 list_for_each_entry_rcu(prog, &head->plist, link) {
91 int filter_res;
93 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
95 if (tc_skip_sw(prog->gen_flags)) {
96 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
97 } else if (at_ingress) {
98 /* It is safe to push/pull even if skb_shared() */
99 __skb_push(skb, skb->mac_len);
100 bpf_compute_data_pointers(skb);
101 filter_res = BPF_PROG_RUN(prog->filter, skb);
102 __skb_pull(skb, skb->mac_len);
103 } else {
104 bpf_compute_data_pointers(skb);
105 filter_res = BPF_PROG_RUN(prog->filter, skb);
108 if (prog->exts_integrated) {
109 res->class = 0;
110 res->classid = TC_H_MAJ(prog->res.classid) |
111 qdisc_skb_cb(skb)->tc_classid;
113 ret = cls_bpf_exec_opcode(filter_res);
114 if (ret == TC_ACT_UNSPEC)
115 continue;
116 break;
119 if (filter_res == 0)
120 continue;
121 if (filter_res != -1) {
122 res->class = 0;
123 res->classid = filter_res;
124 } else {
125 *res = prog->res;
128 ret = tcf_exts_exec(skb, &prog->exts, res);
129 if (ret < 0)
130 continue;
132 break;
134 rcu_read_unlock();
136 return ret;
139 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
141 return !prog->bpf_ops;
144 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
145 struct cls_bpf_prog *oldprog,
146 struct netlink_ext_ack *extack)
148 struct tcf_block *block = tp->chain->block;
149 struct tc_cls_bpf_offload cls_bpf = {};
150 struct cls_bpf_prog *obj;
151 bool skip_sw;
152 int err;
154 skip_sw = prog && tc_skip_sw(prog->gen_flags);
155 obj = prog ?: oldprog;
157 tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack);
158 cls_bpf.command = TC_CLSBPF_OFFLOAD;
159 cls_bpf.exts = &obj->exts;
160 cls_bpf.prog = prog ? prog->filter : NULL;
161 cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
162 cls_bpf.name = obj->bpf_name;
163 cls_bpf.exts_integrated = obj->exts_integrated;
165 if (oldprog)
166 tcf_block_offload_dec(block, &oldprog->gen_flags);
168 err = tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
169 if (prog) {
170 if (err < 0) {
171 cls_bpf_offload_cmd(tp, oldprog, prog, extack);
172 return err;
173 } else if (err > 0) {
174 prog->in_hw_count = err;
175 tcf_block_offload_inc(block, &prog->gen_flags);
179 if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
180 return -EINVAL;
182 return 0;
185 static u32 cls_bpf_flags(u32 flags)
187 return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
190 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
191 struct cls_bpf_prog *oldprog,
192 struct netlink_ext_ack *extack)
194 if (prog && oldprog &&
195 cls_bpf_flags(prog->gen_flags) !=
196 cls_bpf_flags(oldprog->gen_flags))
197 return -EINVAL;
199 if (prog && tc_skip_hw(prog->gen_flags))
200 prog = NULL;
201 if (oldprog && tc_skip_hw(oldprog->gen_flags))
202 oldprog = NULL;
203 if (!prog && !oldprog)
204 return 0;
206 return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
209 static void cls_bpf_stop_offload(struct tcf_proto *tp,
210 struct cls_bpf_prog *prog,
211 struct netlink_ext_ack *extack)
213 int err;
215 err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
216 if (err)
217 pr_err("Stopping hardware offload failed: %d\n", err);
220 static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
221 struct cls_bpf_prog *prog)
223 struct tcf_block *block = tp->chain->block;
224 struct tc_cls_bpf_offload cls_bpf = {};
226 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
227 cls_bpf.command = TC_CLSBPF_STATS;
228 cls_bpf.exts = &prog->exts;
229 cls_bpf.prog = prog->filter;
230 cls_bpf.name = prog->bpf_name;
231 cls_bpf.exts_integrated = prog->exts_integrated;
233 tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false);
236 static int cls_bpf_init(struct tcf_proto *tp)
238 struct cls_bpf_head *head;
240 head = kzalloc(sizeof(*head), GFP_KERNEL);
241 if (head == NULL)
242 return -ENOBUFS;
244 INIT_LIST_HEAD_RCU(&head->plist);
245 idr_init(&head->handle_idr);
246 rcu_assign_pointer(tp->root, head);
248 return 0;
251 static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
253 if (cls_bpf_is_ebpf(prog))
254 bpf_prog_put(prog->filter);
255 else
256 bpf_prog_destroy(prog->filter);
258 kfree(prog->bpf_name);
259 kfree(prog->bpf_ops);
262 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
264 tcf_exts_destroy(&prog->exts);
265 tcf_exts_put_net(&prog->exts);
267 cls_bpf_free_parms(prog);
268 kfree(prog);
271 static void cls_bpf_delete_prog_work(struct work_struct *work)
273 struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
274 struct cls_bpf_prog,
275 rwork);
276 rtnl_lock();
277 __cls_bpf_delete_prog(prog);
278 rtnl_unlock();
281 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
282 struct netlink_ext_ack *extack)
284 struct cls_bpf_head *head = rtnl_dereference(tp->root);
286 idr_remove(&head->handle_idr, prog->handle);
287 cls_bpf_stop_offload(tp, prog, extack);
288 list_del_rcu(&prog->link);
289 tcf_unbind_filter(tp, &prog->res);
290 if (tcf_exts_get_net(&prog->exts))
291 tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work);
292 else
293 __cls_bpf_delete_prog(prog);
296 static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
297 bool rtnl_held, struct netlink_ext_ack *extack)
299 struct cls_bpf_head *head = rtnl_dereference(tp->root);
301 __cls_bpf_delete(tp, arg, extack);
302 *last = list_empty(&head->plist);
303 return 0;
306 static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held,
307 struct netlink_ext_ack *extack)
309 struct cls_bpf_head *head = rtnl_dereference(tp->root);
310 struct cls_bpf_prog *prog, *tmp;
312 list_for_each_entry_safe(prog, tmp, &head->plist, link)
313 __cls_bpf_delete(tp, prog, extack);
315 idr_destroy(&head->handle_idr);
316 kfree_rcu(head, rcu);
319 static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
321 struct cls_bpf_head *head = rtnl_dereference(tp->root);
322 struct cls_bpf_prog *prog;
324 list_for_each_entry(prog, &head->plist, link) {
325 if (prog->handle == handle)
326 return prog;
329 return NULL;
332 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
334 struct sock_filter *bpf_ops;
335 struct sock_fprog_kern fprog_tmp;
336 struct bpf_prog *fp;
337 u16 bpf_size, bpf_num_ops;
338 int ret;
340 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
341 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
342 return -EINVAL;
344 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
345 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
346 return -EINVAL;
348 bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL);
349 if (bpf_ops == NULL)
350 return -ENOMEM;
352 fprog_tmp.len = bpf_num_ops;
353 fprog_tmp.filter = bpf_ops;
355 ret = bpf_prog_create(&fp, &fprog_tmp);
356 if (ret < 0) {
357 kfree(bpf_ops);
358 return ret;
361 prog->bpf_ops = bpf_ops;
362 prog->bpf_num_ops = bpf_num_ops;
363 prog->bpf_name = NULL;
364 prog->filter = fp;
366 return 0;
369 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
370 u32 gen_flags, const struct tcf_proto *tp)
372 struct bpf_prog *fp;
373 char *name = NULL;
374 bool skip_sw;
375 u32 bpf_fd;
377 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
378 skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
380 fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
381 if (IS_ERR(fp))
382 return PTR_ERR(fp);
384 if (tb[TCA_BPF_NAME]) {
385 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
386 if (!name) {
387 bpf_prog_put(fp);
388 return -ENOMEM;
392 prog->bpf_ops = NULL;
393 prog->bpf_name = name;
394 prog->filter = fp;
396 if (fp->dst_needed)
397 tcf_block_netif_keep_dst(tp->chain->block);
399 return 0;
402 static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
403 struct cls_bpf_prog *prog, unsigned long base,
404 struct nlattr **tb, struct nlattr *est, bool ovr,
405 struct netlink_ext_ack *extack)
407 bool is_bpf, is_ebpf, have_exts = false;
408 u32 gen_flags = 0;
409 int ret;
411 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
412 is_ebpf = tb[TCA_BPF_FD];
413 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
414 return -EINVAL;
416 ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr, true,
417 extack);
418 if (ret < 0)
419 return ret;
421 if (tb[TCA_BPF_FLAGS]) {
422 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
424 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
425 return -EINVAL;
427 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
429 if (tb[TCA_BPF_FLAGS_GEN]) {
430 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
431 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
432 !tc_flags_valid(gen_flags))
433 return -EINVAL;
436 prog->exts_integrated = have_exts;
437 prog->gen_flags = gen_flags;
439 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
440 cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
441 if (ret < 0)
442 return ret;
444 if (tb[TCA_BPF_CLASSID]) {
445 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
446 tcf_bind_filter(tp, &prog->res, base);
449 return 0;
452 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
453 struct tcf_proto *tp, unsigned long base,
454 u32 handle, struct nlattr **tca,
455 void **arg, bool ovr, bool rtnl_held,
456 struct netlink_ext_ack *extack)
458 struct cls_bpf_head *head = rtnl_dereference(tp->root);
459 struct cls_bpf_prog *oldprog = *arg;
460 struct nlattr *tb[TCA_BPF_MAX + 1];
461 struct cls_bpf_prog *prog;
462 int ret;
464 if (tca[TCA_OPTIONS] == NULL)
465 return -EINVAL;
467 ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, tca[TCA_OPTIONS],
468 bpf_policy, NULL);
469 if (ret < 0)
470 return ret;
472 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
473 if (!prog)
474 return -ENOBUFS;
476 ret = tcf_exts_init(&prog->exts, net, TCA_BPF_ACT, TCA_BPF_POLICE);
477 if (ret < 0)
478 goto errout;
480 if (oldprog) {
481 if (handle && oldprog->handle != handle) {
482 ret = -EINVAL;
483 goto errout;
487 if (handle == 0) {
488 handle = 1;
489 ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
490 INT_MAX, GFP_KERNEL);
491 } else if (!oldprog) {
492 ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
493 handle, GFP_KERNEL);
496 if (ret)
497 goto errout;
498 prog->handle = handle;
500 ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr,
501 extack);
502 if (ret < 0)
503 goto errout_idr;
505 ret = cls_bpf_offload(tp, prog, oldprog, extack);
506 if (ret)
507 goto errout_parms;
509 if (!tc_in_hw(prog->gen_flags))
510 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
512 if (oldprog) {
513 idr_replace(&head->handle_idr, prog, handle);
514 list_replace_rcu(&oldprog->link, &prog->link);
515 tcf_unbind_filter(tp, &oldprog->res);
516 tcf_exts_get_net(&oldprog->exts);
517 tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work);
518 } else {
519 list_add_rcu(&prog->link, &head->plist);
522 *arg = prog;
523 return 0;
525 errout_parms:
526 cls_bpf_free_parms(prog);
527 errout_idr:
528 if (!oldprog)
529 idr_remove(&head->handle_idr, prog->handle);
530 errout:
531 tcf_exts_destroy(&prog->exts);
532 kfree(prog);
533 return ret;
536 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
537 struct sk_buff *skb)
539 struct nlattr *nla;
541 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
542 return -EMSGSIZE;
544 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
545 sizeof(struct sock_filter));
546 if (nla == NULL)
547 return -EMSGSIZE;
549 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
551 return 0;
554 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
555 struct sk_buff *skb)
557 struct nlattr *nla;
559 if (prog->bpf_name &&
560 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
561 return -EMSGSIZE;
563 if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
564 return -EMSGSIZE;
566 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
567 if (nla == NULL)
568 return -EMSGSIZE;
570 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
572 return 0;
575 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
576 struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held)
578 struct cls_bpf_prog *prog = fh;
579 struct nlattr *nest;
580 u32 bpf_flags = 0;
581 int ret;
583 if (prog == NULL)
584 return skb->len;
586 tm->tcm_handle = prog->handle;
588 cls_bpf_offload_update_stats(tp, prog);
590 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
591 if (nest == NULL)
592 goto nla_put_failure;
594 if (prog->res.classid &&
595 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
596 goto nla_put_failure;
598 if (cls_bpf_is_ebpf(prog))
599 ret = cls_bpf_dump_ebpf_info(prog, skb);
600 else
601 ret = cls_bpf_dump_bpf_info(prog, skb);
602 if (ret)
603 goto nla_put_failure;
605 if (tcf_exts_dump(skb, &prog->exts) < 0)
606 goto nla_put_failure;
608 if (prog->exts_integrated)
609 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
610 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
611 goto nla_put_failure;
612 if (prog->gen_flags &&
613 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
614 goto nla_put_failure;
616 nla_nest_end(skb, nest);
618 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
619 goto nla_put_failure;
621 return skb->len;
623 nla_put_failure:
624 nla_nest_cancel(skb, nest);
625 return -1;
628 static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl)
630 struct cls_bpf_prog *prog = fh;
632 if (prog && prog->res.classid == classid)
633 prog->res.class = cl;
636 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
637 bool rtnl_held)
639 struct cls_bpf_head *head = rtnl_dereference(tp->root);
640 struct cls_bpf_prog *prog;
642 list_for_each_entry(prog, &head->plist, link) {
643 if (arg->count < arg->skip)
644 goto skip;
645 if (arg->fn(tp, prog, arg) < 0) {
646 arg->stop = 1;
647 break;
649 skip:
650 arg->count++;
654 static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
655 void *cb_priv, struct netlink_ext_ack *extack)
657 struct cls_bpf_head *head = rtnl_dereference(tp->root);
658 struct tcf_block *block = tp->chain->block;
659 struct tc_cls_bpf_offload cls_bpf = {};
660 struct cls_bpf_prog *prog;
661 int err;
663 list_for_each_entry(prog, &head->plist, link) {
664 if (tc_skip_hw(prog->gen_flags))
665 continue;
667 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
668 extack);
669 cls_bpf.command = TC_CLSBPF_OFFLOAD;
670 cls_bpf.exts = &prog->exts;
671 cls_bpf.prog = add ? prog->filter : NULL;
672 cls_bpf.oldprog = add ? NULL : prog->filter;
673 cls_bpf.name = prog->bpf_name;
674 cls_bpf.exts_integrated = prog->exts_integrated;
676 err = cb(TC_SETUP_CLSBPF, &cls_bpf, cb_priv);
677 if (err) {
678 if (add && tc_skip_sw(prog->gen_flags))
679 return err;
680 continue;
683 tc_cls_offload_cnt_update(block, &prog->in_hw_count,
684 &prog->gen_flags, add);
687 return 0;
690 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
691 .kind = "bpf",
692 .owner = THIS_MODULE,
693 .classify = cls_bpf_classify,
694 .init = cls_bpf_init,
695 .destroy = cls_bpf_destroy,
696 .get = cls_bpf_get,
697 .change = cls_bpf_change,
698 .delete = cls_bpf_delete,
699 .walk = cls_bpf_walk,
700 .reoffload = cls_bpf_reoffload,
701 .dump = cls_bpf_dump,
702 .bind_class = cls_bpf_bind_class,
705 static int __init cls_bpf_init_mod(void)
707 return register_tcf_proto_ops(&cls_bpf_ops);
710 static void __exit cls_bpf_exit_mod(void)
712 unregister_tcf_proto_ops(&cls_bpf_ops);
715 module_init(cls_bpf_init_mod);
716 module_exit(cls_bpf_exit_mod);