2 * Berkeley Packet Filter based traffic classifier
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
21 #include <net/rtnetlink.h>
22 #include <net/pkt_cls.h>
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27 MODULE_DESCRIPTION("TC BPF based classifier");
29 #define CLS_BPF_NAME_LEN 256
30 #define CLS_BPF_SUPPORTED_GEN_FLAGS \
31 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
34 struct list_head plist
;
40 struct bpf_prog
*filter
;
41 struct list_head link
;
42 struct tcf_result res
;
49 struct sock_filter
*bpf_ops
;
53 struct work_struct work
;
58 static const struct nla_policy bpf_policy
[TCA_BPF_MAX
+ 1] = {
59 [TCA_BPF_CLASSID
] = { .type
= NLA_U32
},
60 [TCA_BPF_FLAGS
] = { .type
= NLA_U32
},
61 [TCA_BPF_FLAGS_GEN
] = { .type
= NLA_U32
},
62 [TCA_BPF_FD
] = { .type
= NLA_U32
},
63 [TCA_BPF_NAME
] = { .type
= NLA_NUL_STRING
,
64 .len
= CLS_BPF_NAME_LEN
},
65 [TCA_BPF_OPS_LEN
] = { .type
= NLA_U16
},
66 [TCA_BPF_OPS
] = { .type
= NLA_BINARY
,
67 .len
= sizeof(struct sock_filter
) * BPF_MAXINSNS
},
70 static int cls_bpf_exec_opcode(int code
)
85 static int cls_bpf_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
86 struct tcf_result
*res
)
88 struct cls_bpf_head
*head
= rcu_dereference_bh(tp
->root
);
89 bool at_ingress
= skb_at_tc_ingress(skb
);
90 struct cls_bpf_prog
*prog
;
93 /* Needed here for accessing maps. */
95 list_for_each_entry_rcu(prog
, &head
->plist
, link
) {
98 qdisc_skb_cb(skb
)->tc_classid
= prog
->res
.classid
;
100 if (tc_skip_sw(prog
->gen_flags
)) {
101 filter_res
= prog
->exts_integrated
? TC_ACT_UNSPEC
: 0;
102 } else if (at_ingress
) {
103 /* It is safe to push/pull even if skb_shared() */
104 __skb_push(skb
, skb
->mac_len
);
105 bpf_compute_data_end(skb
);
106 filter_res
= BPF_PROG_RUN(prog
->filter
, skb
);
107 __skb_pull(skb
, skb
->mac_len
);
109 bpf_compute_data_end(skb
);
110 filter_res
= BPF_PROG_RUN(prog
->filter
, skb
);
113 if (prog
->exts_integrated
) {
115 res
->classid
= TC_H_MAJ(prog
->res
.classid
) |
116 qdisc_skb_cb(skb
)->tc_classid
;
118 ret
= cls_bpf_exec_opcode(filter_res
);
119 if (ret
== TC_ACT_UNSPEC
)
126 if (filter_res
!= -1) {
128 res
->classid
= filter_res
;
133 ret
= tcf_exts_exec(skb
, &prog
->exts
, res
);
144 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog
*prog
)
146 return !prog
->bpf_ops
;
149 static int cls_bpf_offload_cmd(struct tcf_proto
*tp
, struct cls_bpf_prog
*prog
,
150 enum tc_clsbpf_command cmd
)
152 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
153 struct tc_cls_bpf_offload cls_bpf
= {};
156 tc_cls_common_offload_init(&cls_bpf
.common
, tp
);
157 cls_bpf
.command
= cmd
;
158 cls_bpf
.exts
= &prog
->exts
;
159 cls_bpf
.prog
= prog
->filter
;
160 cls_bpf
.name
= prog
->bpf_name
;
161 cls_bpf
.exts_integrated
= prog
->exts_integrated
;
162 cls_bpf
.gen_flags
= prog
->gen_flags
;
164 err
= dev
->netdev_ops
->ndo_setup_tc(dev
, TC_SETUP_CLSBPF
, &cls_bpf
);
165 if (!err
&& (cmd
== TC_CLSBPF_ADD
|| cmd
== TC_CLSBPF_REPLACE
))
166 prog
->gen_flags
|= TCA_CLS_FLAGS_IN_HW
;
171 static int cls_bpf_offload(struct tcf_proto
*tp
, struct cls_bpf_prog
*prog
,
172 struct cls_bpf_prog
*oldprog
)
174 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
175 struct cls_bpf_prog
*obj
= prog
;
176 enum tc_clsbpf_command cmd
;
180 skip_sw
= tc_skip_sw(prog
->gen_flags
) ||
181 (oldprog
&& tc_skip_sw(oldprog
->gen_flags
));
183 if (oldprog
&& oldprog
->offloaded
) {
184 if (tc_should_offload(dev
, prog
->gen_flags
)) {
185 cmd
= TC_CLSBPF_REPLACE
;
186 } else if (!tc_skip_sw(prog
->gen_flags
)) {
188 cmd
= TC_CLSBPF_DESTROY
;
193 if (!tc_should_offload(dev
, prog
->gen_flags
))
194 return skip_sw
? -EINVAL
: 0;
198 ret
= cls_bpf_offload_cmd(tp
, obj
, cmd
);
200 return skip_sw
? ret
: 0;
202 obj
->offloaded
= true;
204 oldprog
->offloaded
= false;
209 static void cls_bpf_stop_offload(struct tcf_proto
*tp
,
210 struct cls_bpf_prog
*prog
)
214 if (!prog
->offloaded
)
217 err
= cls_bpf_offload_cmd(tp
, prog
, TC_CLSBPF_DESTROY
);
219 pr_err("Stopping hardware offload failed: %d\n", err
);
223 prog
->offloaded
= false;
226 static void cls_bpf_offload_update_stats(struct tcf_proto
*tp
,
227 struct cls_bpf_prog
*prog
)
229 if (!prog
->offloaded
)
232 cls_bpf_offload_cmd(tp
, prog
, TC_CLSBPF_STATS
);
235 static int cls_bpf_init(struct tcf_proto
*tp
)
237 struct cls_bpf_head
*head
;
239 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
243 INIT_LIST_HEAD_RCU(&head
->plist
);
244 rcu_assign_pointer(tp
->root
, head
);
249 static void cls_bpf_free_parms(struct cls_bpf_prog
*prog
)
251 if (cls_bpf_is_ebpf(prog
))
252 bpf_prog_put(prog
->filter
);
254 bpf_prog_destroy(prog
->filter
);
256 kfree(prog
->bpf_name
);
257 kfree(prog
->bpf_ops
);
260 static void __cls_bpf_delete_prog(struct cls_bpf_prog
*prog
)
262 tcf_exts_destroy(&prog
->exts
);
263 tcf_exts_put_net(&prog
->exts
);
265 cls_bpf_free_parms(prog
);
269 static void cls_bpf_delete_prog_work(struct work_struct
*work
)
271 struct cls_bpf_prog
*prog
= container_of(work
, struct cls_bpf_prog
, work
);
274 __cls_bpf_delete_prog(prog
);
278 static void cls_bpf_delete_prog_rcu(struct rcu_head
*rcu
)
280 struct cls_bpf_prog
*prog
= container_of(rcu
, struct cls_bpf_prog
, rcu
);
282 INIT_WORK(&prog
->work
, cls_bpf_delete_prog_work
);
283 tcf_queue_work(&prog
->work
);
286 static void __cls_bpf_delete(struct tcf_proto
*tp
, struct cls_bpf_prog
*prog
)
288 cls_bpf_stop_offload(tp
, prog
);
289 list_del_rcu(&prog
->link
);
290 tcf_unbind_filter(tp
, &prog
->res
);
291 if (tcf_exts_get_net(&prog
->exts
))
292 call_rcu(&prog
->rcu
, cls_bpf_delete_prog_rcu
);
294 __cls_bpf_delete_prog(prog
);
297 static int cls_bpf_delete(struct tcf_proto
*tp
, void *arg
, bool *last
)
299 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
301 __cls_bpf_delete(tp
, arg
);
302 *last
= list_empty(&head
->plist
);
306 static void cls_bpf_destroy(struct tcf_proto
*tp
)
308 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
309 struct cls_bpf_prog
*prog
, *tmp
;
311 list_for_each_entry_safe(prog
, tmp
, &head
->plist
, link
)
312 __cls_bpf_delete(tp
, prog
);
314 kfree_rcu(head
, rcu
);
317 static void *cls_bpf_get(struct tcf_proto
*tp
, u32 handle
)
319 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
320 struct cls_bpf_prog
*prog
;
322 list_for_each_entry(prog
, &head
->plist
, link
) {
323 if (prog
->handle
== handle
)
330 static int cls_bpf_prog_from_ops(struct nlattr
**tb
, struct cls_bpf_prog
*prog
)
332 struct sock_filter
*bpf_ops
;
333 struct sock_fprog_kern fprog_tmp
;
335 u16 bpf_size
, bpf_num_ops
;
338 bpf_num_ops
= nla_get_u16(tb
[TCA_BPF_OPS_LEN
]);
339 if (bpf_num_ops
> BPF_MAXINSNS
|| bpf_num_ops
== 0)
342 bpf_size
= bpf_num_ops
* sizeof(*bpf_ops
);
343 if (bpf_size
!= nla_len(tb
[TCA_BPF_OPS
]))
346 bpf_ops
= kzalloc(bpf_size
, GFP_KERNEL
);
350 memcpy(bpf_ops
, nla_data(tb
[TCA_BPF_OPS
]), bpf_size
);
352 fprog_tmp
.len
= bpf_num_ops
;
353 fprog_tmp
.filter
= bpf_ops
;
355 ret
= bpf_prog_create(&fp
, &fprog_tmp
);
361 prog
->bpf_ops
= bpf_ops
;
362 prog
->bpf_num_ops
= bpf_num_ops
;
363 prog
->bpf_name
= NULL
;
369 static int cls_bpf_prog_from_efd(struct nlattr
**tb
, struct cls_bpf_prog
*prog
,
370 const struct tcf_proto
*tp
)
376 bpf_fd
= nla_get_u32(tb
[TCA_BPF_FD
]);
378 fp
= bpf_prog_get_type(bpf_fd
, BPF_PROG_TYPE_SCHED_CLS
);
382 if (tb
[TCA_BPF_NAME
]) {
383 name
= nla_memdup(tb
[TCA_BPF_NAME
], GFP_KERNEL
);
390 prog
->bpf_ops
= NULL
;
391 prog
->bpf_name
= name
;
394 if (fp
->dst_needed
&& !(tp
->q
->flags
& TCQ_F_INGRESS
))
395 netif_keep_dst(qdisc_dev(tp
->q
));
400 static int cls_bpf_set_parms(struct net
*net
, struct tcf_proto
*tp
,
401 struct cls_bpf_prog
*prog
, unsigned long base
,
402 struct nlattr
**tb
, struct nlattr
*est
, bool ovr
)
404 bool is_bpf
, is_ebpf
, have_exts
= false;
408 is_bpf
= tb
[TCA_BPF_OPS_LEN
] && tb
[TCA_BPF_OPS
];
409 is_ebpf
= tb
[TCA_BPF_FD
];
410 if ((!is_bpf
&& !is_ebpf
) || (is_bpf
&& is_ebpf
))
413 ret
= tcf_exts_validate(net
, tp
, tb
, est
, &prog
->exts
, ovr
);
417 if (tb
[TCA_BPF_FLAGS
]) {
418 u32 bpf_flags
= nla_get_u32(tb
[TCA_BPF_FLAGS
]);
420 if (bpf_flags
& ~TCA_BPF_FLAG_ACT_DIRECT
)
423 have_exts
= bpf_flags
& TCA_BPF_FLAG_ACT_DIRECT
;
425 if (tb
[TCA_BPF_FLAGS_GEN
]) {
426 gen_flags
= nla_get_u32(tb
[TCA_BPF_FLAGS_GEN
]);
427 if (gen_flags
& ~CLS_BPF_SUPPORTED_GEN_FLAGS
||
428 !tc_flags_valid(gen_flags
))
432 prog
->exts_integrated
= have_exts
;
433 prog
->gen_flags
= gen_flags
;
435 ret
= is_bpf
? cls_bpf_prog_from_ops(tb
, prog
) :
436 cls_bpf_prog_from_efd(tb
, prog
, tp
);
440 if (tb
[TCA_BPF_CLASSID
]) {
441 prog
->res
.classid
= nla_get_u32(tb
[TCA_BPF_CLASSID
]);
442 tcf_bind_filter(tp
, &prog
->res
, base
);
448 static u32
cls_bpf_grab_new_handle(struct tcf_proto
*tp
,
449 struct cls_bpf_head
*head
)
451 unsigned int i
= 0x80000000;
455 if (++head
->hgen
== 0x7FFFFFFF)
457 } while (--i
> 0 && cls_bpf_get(tp
, head
->hgen
));
459 if (unlikely(i
== 0)) {
460 pr_err("Insufficient number of handles\n");
469 static int cls_bpf_change(struct net
*net
, struct sk_buff
*in_skb
,
470 struct tcf_proto
*tp
, unsigned long base
,
471 u32 handle
, struct nlattr
**tca
,
472 void **arg
, bool ovr
)
474 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
475 struct cls_bpf_prog
*oldprog
= *arg
;
476 struct nlattr
*tb
[TCA_BPF_MAX
+ 1];
477 struct cls_bpf_prog
*prog
;
480 if (tca
[TCA_OPTIONS
] == NULL
)
483 ret
= nla_parse_nested(tb
, TCA_BPF_MAX
, tca
[TCA_OPTIONS
], bpf_policy
,
488 prog
= kzalloc(sizeof(*prog
), GFP_KERNEL
);
492 ret
= tcf_exts_init(&prog
->exts
, TCA_BPF_ACT
, TCA_BPF_POLICE
);
497 if (handle
&& oldprog
->handle
!= handle
) {
504 prog
->handle
= cls_bpf_grab_new_handle(tp
, head
);
506 prog
->handle
= handle
;
507 if (prog
->handle
== 0) {
512 ret
= cls_bpf_set_parms(net
, tp
, prog
, base
, tb
, tca
[TCA_RATE
], ovr
);
516 ret
= cls_bpf_offload(tp
, prog
, oldprog
);
520 if (!tc_in_hw(prog
->gen_flags
))
521 prog
->gen_flags
|= TCA_CLS_FLAGS_NOT_IN_HW
;
524 list_replace_rcu(&oldprog
->link
, &prog
->link
);
525 tcf_unbind_filter(tp
, &oldprog
->res
);
526 tcf_exts_get_net(&oldprog
->exts
);
527 call_rcu(&oldprog
->rcu
, cls_bpf_delete_prog_rcu
);
529 list_add_rcu(&prog
->link
, &head
->plist
);
536 cls_bpf_free_parms(prog
);
538 tcf_exts_destroy(&prog
->exts
);
543 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog
*prog
,
548 if (nla_put_u16(skb
, TCA_BPF_OPS_LEN
, prog
->bpf_num_ops
))
551 nla
= nla_reserve(skb
, TCA_BPF_OPS
, prog
->bpf_num_ops
*
552 sizeof(struct sock_filter
));
556 memcpy(nla_data(nla
), prog
->bpf_ops
, nla_len(nla
));
561 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog
*prog
,
566 if (prog
->bpf_name
&&
567 nla_put_string(skb
, TCA_BPF_NAME
, prog
->bpf_name
))
570 if (nla_put_u32(skb
, TCA_BPF_ID
, prog
->filter
->aux
->id
))
573 nla
= nla_reserve(skb
, TCA_BPF_TAG
, sizeof(prog
->filter
->tag
));
577 memcpy(nla_data(nla
), prog
->filter
->tag
, nla_len(nla
));
582 static int cls_bpf_dump(struct net
*net
, struct tcf_proto
*tp
, void *fh
,
583 struct sk_buff
*skb
, struct tcmsg
*tm
)
585 struct cls_bpf_prog
*prog
= fh
;
593 tm
->tcm_handle
= prog
->handle
;
595 cls_bpf_offload_update_stats(tp
, prog
);
597 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
599 goto nla_put_failure
;
601 if (prog
->res
.classid
&&
602 nla_put_u32(skb
, TCA_BPF_CLASSID
, prog
->res
.classid
))
603 goto nla_put_failure
;
605 if (cls_bpf_is_ebpf(prog
))
606 ret
= cls_bpf_dump_ebpf_info(prog
, skb
);
608 ret
= cls_bpf_dump_bpf_info(prog
, skb
);
610 goto nla_put_failure
;
612 if (tcf_exts_dump(skb
, &prog
->exts
) < 0)
613 goto nla_put_failure
;
615 if (prog
->exts_integrated
)
616 bpf_flags
|= TCA_BPF_FLAG_ACT_DIRECT
;
617 if (bpf_flags
&& nla_put_u32(skb
, TCA_BPF_FLAGS
, bpf_flags
))
618 goto nla_put_failure
;
619 if (prog
->gen_flags
&&
620 nla_put_u32(skb
, TCA_BPF_FLAGS_GEN
, prog
->gen_flags
))
621 goto nla_put_failure
;
623 nla_nest_end(skb
, nest
);
625 if (tcf_exts_dump_stats(skb
, &prog
->exts
) < 0)
626 goto nla_put_failure
;
631 nla_nest_cancel(skb
, nest
);
635 static void cls_bpf_bind_class(void *fh
, u32 classid
, unsigned long cl
)
637 struct cls_bpf_prog
*prog
= fh
;
639 if (prog
&& prog
->res
.classid
== classid
)
640 prog
->res
.class = cl
;
643 static void cls_bpf_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
645 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
646 struct cls_bpf_prog
*prog
;
648 list_for_each_entry(prog
, &head
->plist
, link
) {
649 if (arg
->count
< arg
->skip
)
651 if (arg
->fn(tp
, prog
, arg
) < 0) {
660 static struct tcf_proto_ops cls_bpf_ops __read_mostly
= {
662 .owner
= THIS_MODULE
,
663 .classify
= cls_bpf_classify
,
664 .init
= cls_bpf_init
,
665 .destroy
= cls_bpf_destroy
,
667 .change
= cls_bpf_change
,
668 .delete = cls_bpf_delete
,
669 .walk
= cls_bpf_walk
,
670 .dump
= cls_bpf_dump
,
671 .bind_class
= cls_bpf_bind_class
,
674 static int __init
cls_bpf_init_mod(void)
676 return register_tcf_proto_ops(&cls_bpf_ops
);
679 static void __exit
cls_bpf_exit_mod(void)
681 unregister_tcf_proto_ops(&cls_bpf_ops
);
684 module_init(cls_bpf_init_mod
);
685 module_exit(cls_bpf_exit_mod
);