x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / net / sched / cls_bpf.c
blob3a499530f3211c11587ee919f6034b9d3e59819a
1 /*
2 * Berkeley Packet Filter based traffic classifier
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
6 * ematches.
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
21 #include <net/rtnetlink.h>
22 #include <net/pkt_cls.h>
23 #include <net/sock.h>
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27 MODULE_DESCRIPTION("TC BPF based classifier");
29 #define CLS_BPF_NAME_LEN 256
30 #define CLS_BPF_SUPPORTED_GEN_FLAGS \
31 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
33 struct cls_bpf_head {
34 struct list_head plist;
35 u32 hgen;
36 struct rcu_head rcu;
39 struct cls_bpf_prog {
40 struct bpf_prog *filter;
41 struct list_head link;
42 struct tcf_result res;
43 bool exts_integrated;
44 bool offloaded;
45 u32 gen_flags;
46 struct tcf_exts exts;
47 u32 handle;
48 u16 bpf_num_ops;
49 struct sock_filter *bpf_ops;
50 const char *bpf_name;
51 struct tcf_proto *tp;
52 union {
53 struct work_struct work;
54 struct rcu_head rcu;
58 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
59 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
60 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
61 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
62 [TCA_BPF_FD] = { .type = NLA_U32 },
63 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
64 .len = CLS_BPF_NAME_LEN },
65 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
66 [TCA_BPF_OPS] = { .type = NLA_BINARY,
67 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
70 static int cls_bpf_exec_opcode(int code)
72 switch (code) {
73 case TC_ACT_OK:
74 case TC_ACT_SHOT:
75 case TC_ACT_STOLEN:
76 case TC_ACT_TRAP:
77 case TC_ACT_REDIRECT:
78 case TC_ACT_UNSPEC:
79 return code;
80 default:
81 return TC_ACT_UNSPEC;
85 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
86 struct tcf_result *res)
88 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
89 bool at_ingress = skb_at_tc_ingress(skb);
90 struct cls_bpf_prog *prog;
91 int ret = -1;
93 /* Needed here for accessing maps. */
94 rcu_read_lock();
95 list_for_each_entry_rcu(prog, &head->plist, link) {
96 int filter_res;
98 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
100 if (tc_skip_sw(prog->gen_flags)) {
101 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
102 } else if (at_ingress) {
103 /* It is safe to push/pull even if skb_shared() */
104 __skb_push(skb, skb->mac_len);
105 bpf_compute_data_end(skb);
106 filter_res = BPF_PROG_RUN(prog->filter, skb);
107 __skb_pull(skb, skb->mac_len);
108 } else {
109 bpf_compute_data_end(skb);
110 filter_res = BPF_PROG_RUN(prog->filter, skb);
113 if (prog->exts_integrated) {
114 res->class = 0;
115 res->classid = TC_H_MAJ(prog->res.classid) |
116 qdisc_skb_cb(skb)->tc_classid;
118 ret = cls_bpf_exec_opcode(filter_res);
119 if (ret == TC_ACT_UNSPEC)
120 continue;
121 break;
124 if (filter_res == 0)
125 continue;
126 if (filter_res != -1) {
127 res->class = 0;
128 res->classid = filter_res;
129 } else {
130 *res = prog->res;
133 ret = tcf_exts_exec(skb, &prog->exts, res);
134 if (ret < 0)
135 continue;
137 break;
139 rcu_read_unlock();
141 return ret;
144 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
146 return !prog->bpf_ops;
149 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
150 enum tc_clsbpf_command cmd)
152 struct net_device *dev = tp->q->dev_queue->dev;
153 struct tc_cls_bpf_offload cls_bpf = {};
154 int err;
156 tc_cls_common_offload_init(&cls_bpf.common, tp);
157 cls_bpf.command = cmd;
158 cls_bpf.exts = &prog->exts;
159 cls_bpf.prog = prog->filter;
160 cls_bpf.name = prog->bpf_name;
161 cls_bpf.exts_integrated = prog->exts_integrated;
162 cls_bpf.gen_flags = prog->gen_flags;
164 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSBPF, &cls_bpf);
165 if (!err && (cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE))
166 prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
168 return err;
171 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
172 struct cls_bpf_prog *oldprog)
174 struct net_device *dev = tp->q->dev_queue->dev;
175 struct cls_bpf_prog *obj = prog;
176 enum tc_clsbpf_command cmd;
177 bool skip_sw;
178 int ret;
180 skip_sw = tc_skip_sw(prog->gen_flags) ||
181 (oldprog && tc_skip_sw(oldprog->gen_flags));
183 if (oldprog && oldprog->offloaded) {
184 if (tc_should_offload(dev, prog->gen_flags)) {
185 cmd = TC_CLSBPF_REPLACE;
186 } else if (!tc_skip_sw(prog->gen_flags)) {
187 obj = oldprog;
188 cmd = TC_CLSBPF_DESTROY;
189 } else {
190 return -EINVAL;
192 } else {
193 if (!tc_should_offload(dev, prog->gen_flags))
194 return skip_sw ? -EINVAL : 0;
195 cmd = TC_CLSBPF_ADD;
198 ret = cls_bpf_offload_cmd(tp, obj, cmd);
199 if (ret)
200 return skip_sw ? ret : 0;
202 obj->offloaded = true;
203 if (oldprog)
204 oldprog->offloaded = false;
206 return 0;
209 static void cls_bpf_stop_offload(struct tcf_proto *tp,
210 struct cls_bpf_prog *prog)
212 int err;
214 if (!prog->offloaded)
215 return;
217 err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
218 if (err) {
219 pr_err("Stopping hardware offload failed: %d\n", err);
220 return;
223 prog->offloaded = false;
226 static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
227 struct cls_bpf_prog *prog)
229 if (!prog->offloaded)
230 return;
232 cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
235 static int cls_bpf_init(struct tcf_proto *tp)
237 struct cls_bpf_head *head;
239 head = kzalloc(sizeof(*head), GFP_KERNEL);
240 if (head == NULL)
241 return -ENOBUFS;
243 INIT_LIST_HEAD_RCU(&head->plist);
244 rcu_assign_pointer(tp->root, head);
246 return 0;
249 static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
251 if (cls_bpf_is_ebpf(prog))
252 bpf_prog_put(prog->filter);
253 else
254 bpf_prog_destroy(prog->filter);
256 kfree(prog->bpf_name);
257 kfree(prog->bpf_ops);
260 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
262 tcf_exts_destroy(&prog->exts);
263 tcf_exts_put_net(&prog->exts);
265 cls_bpf_free_parms(prog);
266 kfree(prog);
269 static void cls_bpf_delete_prog_work(struct work_struct *work)
271 struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work);
273 rtnl_lock();
274 __cls_bpf_delete_prog(prog);
275 rtnl_unlock();
278 static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
280 struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
282 INIT_WORK(&prog->work, cls_bpf_delete_prog_work);
283 tcf_queue_work(&prog->work);
286 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
288 cls_bpf_stop_offload(tp, prog);
289 list_del_rcu(&prog->link);
290 tcf_unbind_filter(tp, &prog->res);
291 if (tcf_exts_get_net(&prog->exts))
292 call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
293 else
294 __cls_bpf_delete_prog(prog);
297 static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last)
299 struct cls_bpf_head *head = rtnl_dereference(tp->root);
301 __cls_bpf_delete(tp, arg);
302 *last = list_empty(&head->plist);
303 return 0;
306 static void cls_bpf_destroy(struct tcf_proto *tp)
308 struct cls_bpf_head *head = rtnl_dereference(tp->root);
309 struct cls_bpf_prog *prog, *tmp;
311 list_for_each_entry_safe(prog, tmp, &head->plist, link)
312 __cls_bpf_delete(tp, prog);
314 kfree_rcu(head, rcu);
317 static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
319 struct cls_bpf_head *head = rtnl_dereference(tp->root);
320 struct cls_bpf_prog *prog;
322 list_for_each_entry(prog, &head->plist, link) {
323 if (prog->handle == handle)
324 return prog;
327 return NULL;
330 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
332 struct sock_filter *bpf_ops;
333 struct sock_fprog_kern fprog_tmp;
334 struct bpf_prog *fp;
335 u16 bpf_size, bpf_num_ops;
336 int ret;
338 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
339 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
340 return -EINVAL;
342 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
343 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
344 return -EINVAL;
346 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
347 if (bpf_ops == NULL)
348 return -ENOMEM;
350 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
352 fprog_tmp.len = bpf_num_ops;
353 fprog_tmp.filter = bpf_ops;
355 ret = bpf_prog_create(&fp, &fprog_tmp);
356 if (ret < 0) {
357 kfree(bpf_ops);
358 return ret;
361 prog->bpf_ops = bpf_ops;
362 prog->bpf_num_ops = bpf_num_ops;
363 prog->bpf_name = NULL;
364 prog->filter = fp;
366 return 0;
369 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
370 const struct tcf_proto *tp)
372 struct bpf_prog *fp;
373 char *name = NULL;
374 u32 bpf_fd;
376 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
378 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
379 if (IS_ERR(fp))
380 return PTR_ERR(fp);
382 if (tb[TCA_BPF_NAME]) {
383 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
384 if (!name) {
385 bpf_prog_put(fp);
386 return -ENOMEM;
390 prog->bpf_ops = NULL;
391 prog->bpf_name = name;
392 prog->filter = fp;
394 if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
395 netif_keep_dst(qdisc_dev(tp->q));
397 return 0;
400 static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
401 struct cls_bpf_prog *prog, unsigned long base,
402 struct nlattr **tb, struct nlattr *est, bool ovr)
404 bool is_bpf, is_ebpf, have_exts = false;
405 u32 gen_flags = 0;
406 int ret;
408 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
409 is_ebpf = tb[TCA_BPF_FD];
410 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
411 return -EINVAL;
413 ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr);
414 if (ret < 0)
415 return ret;
417 if (tb[TCA_BPF_FLAGS]) {
418 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
420 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
421 return -EINVAL;
423 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
425 if (tb[TCA_BPF_FLAGS_GEN]) {
426 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
427 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
428 !tc_flags_valid(gen_flags))
429 return -EINVAL;
432 prog->exts_integrated = have_exts;
433 prog->gen_flags = gen_flags;
435 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
436 cls_bpf_prog_from_efd(tb, prog, tp);
437 if (ret < 0)
438 return ret;
440 if (tb[TCA_BPF_CLASSID]) {
441 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
442 tcf_bind_filter(tp, &prog->res, base);
445 return 0;
448 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
449 struct cls_bpf_head *head)
451 unsigned int i = 0x80000000;
452 u32 handle;
454 do {
455 if (++head->hgen == 0x7FFFFFFF)
456 head->hgen = 1;
457 } while (--i > 0 && cls_bpf_get(tp, head->hgen));
459 if (unlikely(i == 0)) {
460 pr_err("Insufficient number of handles\n");
461 handle = 0;
462 } else {
463 handle = head->hgen;
466 return handle;
469 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
470 struct tcf_proto *tp, unsigned long base,
471 u32 handle, struct nlattr **tca,
472 void **arg, bool ovr)
474 struct cls_bpf_head *head = rtnl_dereference(tp->root);
475 struct cls_bpf_prog *oldprog = *arg;
476 struct nlattr *tb[TCA_BPF_MAX + 1];
477 struct cls_bpf_prog *prog;
478 int ret;
480 if (tca[TCA_OPTIONS] == NULL)
481 return -EINVAL;
483 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy,
484 NULL);
485 if (ret < 0)
486 return ret;
488 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
489 if (!prog)
490 return -ENOBUFS;
492 ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
493 if (ret < 0)
494 goto errout;
496 if (oldprog) {
497 if (handle && oldprog->handle != handle) {
498 ret = -EINVAL;
499 goto errout;
503 if (handle == 0)
504 prog->handle = cls_bpf_grab_new_handle(tp, head);
505 else
506 prog->handle = handle;
507 if (prog->handle == 0) {
508 ret = -EINVAL;
509 goto errout;
512 ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
513 if (ret < 0)
514 goto errout;
516 ret = cls_bpf_offload(tp, prog, oldprog);
517 if (ret)
518 goto errout_parms;
520 if (!tc_in_hw(prog->gen_flags))
521 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
523 if (oldprog) {
524 list_replace_rcu(&oldprog->link, &prog->link);
525 tcf_unbind_filter(tp, &oldprog->res);
526 tcf_exts_get_net(&oldprog->exts);
527 call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
528 } else {
529 list_add_rcu(&prog->link, &head->plist);
532 *arg = prog;
533 return 0;
535 errout_parms:
536 cls_bpf_free_parms(prog);
537 errout:
538 tcf_exts_destroy(&prog->exts);
539 kfree(prog);
540 return ret;
543 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
544 struct sk_buff *skb)
546 struct nlattr *nla;
548 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
549 return -EMSGSIZE;
551 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
552 sizeof(struct sock_filter));
553 if (nla == NULL)
554 return -EMSGSIZE;
556 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
558 return 0;
561 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
562 struct sk_buff *skb)
564 struct nlattr *nla;
566 if (prog->bpf_name &&
567 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
568 return -EMSGSIZE;
570 if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
571 return -EMSGSIZE;
573 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
574 if (nla == NULL)
575 return -EMSGSIZE;
577 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
579 return 0;
582 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
583 struct sk_buff *skb, struct tcmsg *tm)
585 struct cls_bpf_prog *prog = fh;
586 struct nlattr *nest;
587 u32 bpf_flags = 0;
588 int ret;
590 if (prog == NULL)
591 return skb->len;
593 tm->tcm_handle = prog->handle;
595 cls_bpf_offload_update_stats(tp, prog);
597 nest = nla_nest_start(skb, TCA_OPTIONS);
598 if (nest == NULL)
599 goto nla_put_failure;
601 if (prog->res.classid &&
602 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
603 goto nla_put_failure;
605 if (cls_bpf_is_ebpf(prog))
606 ret = cls_bpf_dump_ebpf_info(prog, skb);
607 else
608 ret = cls_bpf_dump_bpf_info(prog, skb);
609 if (ret)
610 goto nla_put_failure;
612 if (tcf_exts_dump(skb, &prog->exts) < 0)
613 goto nla_put_failure;
615 if (prog->exts_integrated)
616 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
617 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
618 goto nla_put_failure;
619 if (prog->gen_flags &&
620 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
621 goto nla_put_failure;
623 nla_nest_end(skb, nest);
625 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
626 goto nla_put_failure;
628 return skb->len;
630 nla_put_failure:
631 nla_nest_cancel(skb, nest);
632 return -1;
635 static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl)
637 struct cls_bpf_prog *prog = fh;
639 if (prog && prog->res.classid == classid)
640 prog->res.class = cl;
643 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
645 struct cls_bpf_head *head = rtnl_dereference(tp->root);
646 struct cls_bpf_prog *prog;
648 list_for_each_entry(prog, &head->plist, link) {
649 if (arg->count < arg->skip)
650 goto skip;
651 if (arg->fn(tp, prog, arg) < 0) {
652 arg->stop = 1;
653 break;
655 skip:
656 arg->count++;
660 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
661 .kind = "bpf",
662 .owner = THIS_MODULE,
663 .classify = cls_bpf_classify,
664 .init = cls_bpf_init,
665 .destroy = cls_bpf_destroy,
666 .get = cls_bpf_get,
667 .change = cls_bpf_change,
668 .delete = cls_bpf_delete,
669 .walk = cls_bpf_walk,
670 .dump = cls_bpf_dump,
671 .bind_class = cls_bpf_bind_class,
674 static int __init cls_bpf_init_mod(void)
676 return register_tcf_proto_ops(&cls_bpf_ops);
679 static void __exit cls_bpf_exit_mod(void)
681 unregister_tcf_proto_ops(&cls_bpf_ops);
684 module_init(cls_bpf_init_mod);
685 module_exit(cls_bpf_exit_mod);