1 // SPDX-License-Identifier: GPL-2.0-only
3 * Berkeley Packet Filter based traffic classifier
5 * Might be used to classify traffic through flexible, user-defined and
6 * possibly JIT-ed BPF filters for traffic control as an alternative to
9 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/skbuff.h>
15 #include <linux/filter.h>
16 #include <linux/bpf.h>
17 #include <linux/idr.h>
19 #include <net/rtnetlink.h>
20 #include <net/pkt_cls.h>
23 MODULE_LICENSE("GPL");
24 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
25 MODULE_DESCRIPTION("TC BPF based classifier");
27 #define CLS_BPF_NAME_LEN 256
28 #define CLS_BPF_SUPPORTED_GEN_FLAGS \
29 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
32 struct list_head plist
;
33 struct idr handle_idr
;
38 struct bpf_prog
*filter
;
39 struct list_head link
;
40 struct tcf_result res
;
43 unsigned int in_hw_count
;
47 struct sock_filter
*bpf_ops
;
50 struct rcu_work rwork
;
53 static const struct nla_policy bpf_policy
[TCA_BPF_MAX
+ 1] = {
54 [TCA_BPF_CLASSID
] = { .type
= NLA_U32
},
55 [TCA_BPF_FLAGS
] = { .type
= NLA_U32
},
56 [TCA_BPF_FLAGS_GEN
] = { .type
= NLA_U32
},
57 [TCA_BPF_FD
] = { .type
= NLA_U32
},
58 [TCA_BPF_NAME
] = { .type
= NLA_NUL_STRING
,
59 .len
= CLS_BPF_NAME_LEN
},
60 [TCA_BPF_OPS_LEN
] = { .type
= NLA_U16
},
61 [TCA_BPF_OPS
] = { .type
= NLA_BINARY
,
62 .len
= sizeof(struct sock_filter
) * BPF_MAXINSNS
},
65 static int cls_bpf_exec_opcode(int code
)
80 static int cls_bpf_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
81 struct tcf_result
*res
)
83 struct cls_bpf_head
*head
= rcu_dereference_bh(tp
->root
);
84 bool at_ingress
= skb_at_tc_ingress(skb
);
85 struct cls_bpf_prog
*prog
;
88 /* Needed here for accessing maps. */
90 list_for_each_entry_rcu(prog
, &head
->plist
, link
) {
93 qdisc_skb_cb(skb
)->tc_classid
= prog
->res
.classid
;
95 if (tc_skip_sw(prog
->gen_flags
)) {
96 filter_res
= prog
->exts_integrated
? TC_ACT_UNSPEC
: 0;
97 } else if (at_ingress
) {
98 /* It is safe to push/pull even if skb_shared() */
99 __skb_push(skb
, skb
->mac_len
);
100 bpf_compute_data_pointers(skb
);
101 filter_res
= BPF_PROG_RUN(prog
->filter
, skb
);
102 __skb_pull(skb
, skb
->mac_len
);
104 bpf_compute_data_pointers(skb
);
105 filter_res
= BPF_PROG_RUN(prog
->filter
, skb
);
108 if (prog
->exts_integrated
) {
110 res
->classid
= TC_H_MAJ(prog
->res
.classid
) |
111 qdisc_skb_cb(skb
)->tc_classid
;
113 ret
= cls_bpf_exec_opcode(filter_res
);
114 if (ret
== TC_ACT_UNSPEC
)
121 if (filter_res
!= -1) {
123 res
->classid
= filter_res
;
128 ret
= tcf_exts_exec(skb
, &prog
->exts
, res
);
139 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog
*prog
)
141 return !prog
->bpf_ops
;
144 static int cls_bpf_offload_cmd(struct tcf_proto
*tp
, struct cls_bpf_prog
*prog
,
145 struct cls_bpf_prog
*oldprog
,
146 struct netlink_ext_ack
*extack
)
148 struct tcf_block
*block
= tp
->chain
->block
;
149 struct tc_cls_bpf_offload cls_bpf
= {};
150 struct cls_bpf_prog
*obj
;
154 skip_sw
= prog
&& tc_skip_sw(prog
->gen_flags
);
155 obj
= prog
?: oldprog
;
157 tc_cls_common_offload_init(&cls_bpf
.common
, tp
, obj
->gen_flags
, extack
);
158 cls_bpf
.command
= TC_CLSBPF_OFFLOAD
;
159 cls_bpf
.exts
= &obj
->exts
;
160 cls_bpf
.prog
= prog
? prog
->filter
: NULL
;
161 cls_bpf
.oldprog
= oldprog
? oldprog
->filter
: NULL
;
162 cls_bpf
.name
= obj
->bpf_name
;
163 cls_bpf
.exts_integrated
= obj
->exts_integrated
;
166 err
= tc_setup_cb_replace(block
, tp
, TC_SETUP_CLSBPF
, &cls_bpf
,
167 skip_sw
, &oldprog
->gen_flags
,
168 &oldprog
->in_hw_count
,
169 &prog
->gen_flags
, &prog
->in_hw_count
,
172 err
= tc_setup_cb_add(block
, tp
, TC_SETUP_CLSBPF
, &cls_bpf
,
173 skip_sw
, &prog
->gen_flags
,
174 &prog
->in_hw_count
, true);
176 err
= tc_setup_cb_destroy(block
, tp
, TC_SETUP_CLSBPF
, &cls_bpf
,
177 skip_sw
, &oldprog
->gen_flags
,
178 &oldprog
->in_hw_count
, true);
181 cls_bpf_offload_cmd(tp
, oldprog
, prog
, extack
);
185 if (prog
&& skip_sw
&& !(prog
->gen_flags
& TCA_CLS_FLAGS_IN_HW
))
191 static u32
cls_bpf_flags(u32 flags
)
193 return flags
& CLS_BPF_SUPPORTED_GEN_FLAGS
;
196 static int cls_bpf_offload(struct tcf_proto
*tp
, struct cls_bpf_prog
*prog
,
197 struct cls_bpf_prog
*oldprog
,
198 struct netlink_ext_ack
*extack
)
200 if (prog
&& oldprog
&&
201 cls_bpf_flags(prog
->gen_flags
) !=
202 cls_bpf_flags(oldprog
->gen_flags
))
205 if (prog
&& tc_skip_hw(prog
->gen_flags
))
207 if (oldprog
&& tc_skip_hw(oldprog
->gen_flags
))
209 if (!prog
&& !oldprog
)
212 return cls_bpf_offload_cmd(tp
, prog
, oldprog
, extack
);
215 static void cls_bpf_stop_offload(struct tcf_proto
*tp
,
216 struct cls_bpf_prog
*prog
,
217 struct netlink_ext_ack
*extack
)
221 err
= cls_bpf_offload_cmd(tp
, NULL
, prog
, extack
);
223 pr_err("Stopping hardware offload failed: %d\n", err
);
226 static void cls_bpf_offload_update_stats(struct tcf_proto
*tp
,
227 struct cls_bpf_prog
*prog
)
229 struct tcf_block
*block
= tp
->chain
->block
;
230 struct tc_cls_bpf_offload cls_bpf
= {};
232 tc_cls_common_offload_init(&cls_bpf
.common
, tp
, prog
->gen_flags
, NULL
);
233 cls_bpf
.command
= TC_CLSBPF_STATS
;
234 cls_bpf
.exts
= &prog
->exts
;
235 cls_bpf
.prog
= prog
->filter
;
236 cls_bpf
.name
= prog
->bpf_name
;
237 cls_bpf
.exts_integrated
= prog
->exts_integrated
;
239 tc_setup_cb_call(block
, TC_SETUP_CLSBPF
, &cls_bpf
, false, true);
242 static int cls_bpf_init(struct tcf_proto
*tp
)
244 struct cls_bpf_head
*head
;
246 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
250 INIT_LIST_HEAD_RCU(&head
->plist
);
251 idr_init(&head
->handle_idr
);
252 rcu_assign_pointer(tp
->root
, head
);
257 static void cls_bpf_free_parms(struct cls_bpf_prog
*prog
)
259 if (cls_bpf_is_ebpf(prog
))
260 bpf_prog_put(prog
->filter
);
262 bpf_prog_destroy(prog
->filter
);
264 kfree(prog
->bpf_name
);
265 kfree(prog
->bpf_ops
);
268 static void __cls_bpf_delete_prog(struct cls_bpf_prog
*prog
)
270 tcf_exts_destroy(&prog
->exts
);
271 tcf_exts_put_net(&prog
->exts
);
273 cls_bpf_free_parms(prog
);
277 static void cls_bpf_delete_prog_work(struct work_struct
*work
)
279 struct cls_bpf_prog
*prog
= container_of(to_rcu_work(work
),
283 __cls_bpf_delete_prog(prog
);
287 static void __cls_bpf_delete(struct tcf_proto
*tp
, struct cls_bpf_prog
*prog
,
288 struct netlink_ext_ack
*extack
)
290 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
292 idr_remove(&head
->handle_idr
, prog
->handle
);
293 cls_bpf_stop_offload(tp
, prog
, extack
);
294 list_del_rcu(&prog
->link
);
295 tcf_unbind_filter(tp
, &prog
->res
);
296 if (tcf_exts_get_net(&prog
->exts
))
297 tcf_queue_work(&prog
->rwork
, cls_bpf_delete_prog_work
);
299 __cls_bpf_delete_prog(prog
);
302 static int cls_bpf_delete(struct tcf_proto
*tp
, void *arg
, bool *last
,
303 bool rtnl_held
, struct netlink_ext_ack
*extack
)
305 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
307 __cls_bpf_delete(tp
, arg
, extack
);
308 *last
= list_empty(&head
->plist
);
312 static void cls_bpf_destroy(struct tcf_proto
*tp
, bool rtnl_held
,
313 struct netlink_ext_ack
*extack
)
315 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
316 struct cls_bpf_prog
*prog
, *tmp
;
318 list_for_each_entry_safe(prog
, tmp
, &head
->plist
, link
)
319 __cls_bpf_delete(tp
, prog
, extack
);
321 idr_destroy(&head
->handle_idr
);
322 kfree_rcu(head
, rcu
);
325 static void *cls_bpf_get(struct tcf_proto
*tp
, u32 handle
)
327 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
328 struct cls_bpf_prog
*prog
;
330 list_for_each_entry(prog
, &head
->plist
, link
) {
331 if (prog
->handle
== handle
)
338 static int cls_bpf_prog_from_ops(struct nlattr
**tb
, struct cls_bpf_prog
*prog
)
340 struct sock_filter
*bpf_ops
;
341 struct sock_fprog_kern fprog_tmp
;
343 u16 bpf_size
, bpf_num_ops
;
346 bpf_num_ops
= nla_get_u16(tb
[TCA_BPF_OPS_LEN
]);
347 if (bpf_num_ops
> BPF_MAXINSNS
|| bpf_num_ops
== 0)
350 bpf_size
= bpf_num_ops
* sizeof(*bpf_ops
);
351 if (bpf_size
!= nla_len(tb
[TCA_BPF_OPS
]))
354 bpf_ops
= kmemdup(nla_data(tb
[TCA_BPF_OPS
]), bpf_size
, GFP_KERNEL
);
358 fprog_tmp
.len
= bpf_num_ops
;
359 fprog_tmp
.filter
= bpf_ops
;
361 ret
= bpf_prog_create(&fp
, &fprog_tmp
);
367 prog
->bpf_ops
= bpf_ops
;
368 prog
->bpf_num_ops
= bpf_num_ops
;
369 prog
->bpf_name
= NULL
;
375 static int cls_bpf_prog_from_efd(struct nlattr
**tb
, struct cls_bpf_prog
*prog
,
376 u32 gen_flags
, const struct tcf_proto
*tp
)
383 bpf_fd
= nla_get_u32(tb
[TCA_BPF_FD
]);
384 skip_sw
= gen_flags
& TCA_CLS_FLAGS_SKIP_SW
;
386 fp
= bpf_prog_get_type_dev(bpf_fd
, BPF_PROG_TYPE_SCHED_CLS
, skip_sw
);
390 if (tb
[TCA_BPF_NAME
]) {
391 name
= nla_memdup(tb
[TCA_BPF_NAME
], GFP_KERNEL
);
398 prog
->bpf_ops
= NULL
;
399 prog
->bpf_name
= name
;
403 tcf_block_netif_keep_dst(tp
->chain
->block
);
408 static int cls_bpf_set_parms(struct net
*net
, struct tcf_proto
*tp
,
409 struct cls_bpf_prog
*prog
, unsigned long base
,
410 struct nlattr
**tb
, struct nlattr
*est
, bool ovr
,
411 struct netlink_ext_ack
*extack
)
413 bool is_bpf
, is_ebpf
, have_exts
= false;
417 is_bpf
= tb
[TCA_BPF_OPS_LEN
] && tb
[TCA_BPF_OPS
];
418 is_ebpf
= tb
[TCA_BPF_FD
];
419 if ((!is_bpf
&& !is_ebpf
) || (is_bpf
&& is_ebpf
))
422 ret
= tcf_exts_validate(net
, tp
, tb
, est
, &prog
->exts
, ovr
, true,
427 if (tb
[TCA_BPF_FLAGS
]) {
428 u32 bpf_flags
= nla_get_u32(tb
[TCA_BPF_FLAGS
]);
430 if (bpf_flags
& ~TCA_BPF_FLAG_ACT_DIRECT
)
433 have_exts
= bpf_flags
& TCA_BPF_FLAG_ACT_DIRECT
;
435 if (tb
[TCA_BPF_FLAGS_GEN
]) {
436 gen_flags
= nla_get_u32(tb
[TCA_BPF_FLAGS_GEN
]);
437 if (gen_flags
& ~CLS_BPF_SUPPORTED_GEN_FLAGS
||
438 !tc_flags_valid(gen_flags
))
442 prog
->exts_integrated
= have_exts
;
443 prog
->gen_flags
= gen_flags
;
445 ret
= is_bpf
? cls_bpf_prog_from_ops(tb
, prog
) :
446 cls_bpf_prog_from_efd(tb
, prog
, gen_flags
, tp
);
450 if (tb
[TCA_BPF_CLASSID
]) {
451 prog
->res
.classid
= nla_get_u32(tb
[TCA_BPF_CLASSID
]);
452 tcf_bind_filter(tp
, &prog
->res
, base
);
458 static int cls_bpf_change(struct net
*net
, struct sk_buff
*in_skb
,
459 struct tcf_proto
*tp
, unsigned long base
,
460 u32 handle
, struct nlattr
**tca
,
461 void **arg
, bool ovr
, bool rtnl_held
,
462 struct netlink_ext_ack
*extack
)
464 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
465 struct cls_bpf_prog
*oldprog
= *arg
;
466 struct nlattr
*tb
[TCA_BPF_MAX
+ 1];
467 struct cls_bpf_prog
*prog
;
470 if (tca
[TCA_OPTIONS
] == NULL
)
473 ret
= nla_parse_nested_deprecated(tb
, TCA_BPF_MAX
, tca
[TCA_OPTIONS
],
478 prog
= kzalloc(sizeof(*prog
), GFP_KERNEL
);
482 ret
= tcf_exts_init(&prog
->exts
, net
, TCA_BPF_ACT
, TCA_BPF_POLICE
);
487 if (handle
&& oldprog
->handle
!= handle
) {
495 ret
= idr_alloc_u32(&head
->handle_idr
, prog
, &handle
,
496 INT_MAX
, GFP_KERNEL
);
497 } else if (!oldprog
) {
498 ret
= idr_alloc_u32(&head
->handle_idr
, prog
, &handle
,
504 prog
->handle
= handle
;
506 ret
= cls_bpf_set_parms(net
, tp
, prog
, base
, tb
, tca
[TCA_RATE
], ovr
,
511 ret
= cls_bpf_offload(tp
, prog
, oldprog
, extack
);
515 if (!tc_in_hw(prog
->gen_flags
))
516 prog
->gen_flags
|= TCA_CLS_FLAGS_NOT_IN_HW
;
519 idr_replace(&head
->handle_idr
, prog
, handle
);
520 list_replace_rcu(&oldprog
->link
, &prog
->link
);
521 tcf_unbind_filter(tp
, &oldprog
->res
);
522 tcf_exts_get_net(&oldprog
->exts
);
523 tcf_queue_work(&oldprog
->rwork
, cls_bpf_delete_prog_work
);
525 list_add_rcu(&prog
->link
, &head
->plist
);
532 cls_bpf_free_parms(prog
);
535 idr_remove(&head
->handle_idr
, prog
->handle
);
537 tcf_exts_destroy(&prog
->exts
);
542 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog
*prog
,
547 if (nla_put_u16(skb
, TCA_BPF_OPS_LEN
, prog
->bpf_num_ops
))
550 nla
= nla_reserve(skb
, TCA_BPF_OPS
, prog
->bpf_num_ops
*
551 sizeof(struct sock_filter
));
555 memcpy(nla_data(nla
), prog
->bpf_ops
, nla_len(nla
));
560 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog
*prog
,
565 if (prog
->bpf_name
&&
566 nla_put_string(skb
, TCA_BPF_NAME
, prog
->bpf_name
))
569 if (nla_put_u32(skb
, TCA_BPF_ID
, prog
->filter
->aux
->id
))
572 nla
= nla_reserve(skb
, TCA_BPF_TAG
, sizeof(prog
->filter
->tag
));
576 memcpy(nla_data(nla
), prog
->filter
->tag
, nla_len(nla
));
581 static int cls_bpf_dump(struct net
*net
, struct tcf_proto
*tp
, void *fh
,
582 struct sk_buff
*skb
, struct tcmsg
*tm
, bool rtnl_held
)
584 struct cls_bpf_prog
*prog
= fh
;
592 tm
->tcm_handle
= prog
->handle
;
594 cls_bpf_offload_update_stats(tp
, prog
);
596 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
598 goto nla_put_failure
;
600 if (prog
->res
.classid
&&
601 nla_put_u32(skb
, TCA_BPF_CLASSID
, prog
->res
.classid
))
602 goto nla_put_failure
;
604 if (cls_bpf_is_ebpf(prog
))
605 ret
= cls_bpf_dump_ebpf_info(prog
, skb
);
607 ret
= cls_bpf_dump_bpf_info(prog
, skb
);
609 goto nla_put_failure
;
611 if (tcf_exts_dump(skb
, &prog
->exts
) < 0)
612 goto nla_put_failure
;
614 if (prog
->exts_integrated
)
615 bpf_flags
|= TCA_BPF_FLAG_ACT_DIRECT
;
616 if (bpf_flags
&& nla_put_u32(skb
, TCA_BPF_FLAGS
, bpf_flags
))
617 goto nla_put_failure
;
618 if (prog
->gen_flags
&&
619 nla_put_u32(skb
, TCA_BPF_FLAGS_GEN
, prog
->gen_flags
))
620 goto nla_put_failure
;
622 nla_nest_end(skb
, nest
);
624 if (tcf_exts_dump_stats(skb
, &prog
->exts
) < 0)
625 goto nla_put_failure
;
630 nla_nest_cancel(skb
, nest
);
634 static void cls_bpf_bind_class(void *fh
, u32 classid
, unsigned long cl
,
635 void *q
, unsigned long base
)
637 struct cls_bpf_prog
*prog
= fh
;
639 if (prog
&& prog
->res
.classid
== classid
) {
641 __tcf_bind_filter(q
, &prog
->res
, base
);
643 __tcf_unbind_filter(q
, &prog
->res
);
647 static void cls_bpf_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
,
650 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
651 struct cls_bpf_prog
*prog
;
653 list_for_each_entry(prog
, &head
->plist
, link
) {
654 if (arg
->count
< arg
->skip
)
656 if (arg
->fn(tp
, prog
, arg
) < 0) {
665 static int cls_bpf_reoffload(struct tcf_proto
*tp
, bool add
, flow_setup_cb_t
*cb
,
666 void *cb_priv
, struct netlink_ext_ack
*extack
)
668 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
669 struct tcf_block
*block
= tp
->chain
->block
;
670 struct tc_cls_bpf_offload cls_bpf
= {};
671 struct cls_bpf_prog
*prog
;
674 list_for_each_entry(prog
, &head
->plist
, link
) {
675 if (tc_skip_hw(prog
->gen_flags
))
678 tc_cls_common_offload_init(&cls_bpf
.common
, tp
, prog
->gen_flags
,
680 cls_bpf
.command
= TC_CLSBPF_OFFLOAD
;
681 cls_bpf
.exts
= &prog
->exts
;
682 cls_bpf
.prog
= add
? prog
->filter
: NULL
;
683 cls_bpf
.oldprog
= add
? NULL
: prog
->filter
;
684 cls_bpf
.name
= prog
->bpf_name
;
685 cls_bpf
.exts_integrated
= prog
->exts_integrated
;
687 err
= tc_setup_cb_reoffload(block
, tp
, add
, cb
, TC_SETUP_CLSBPF
,
688 &cls_bpf
, cb_priv
, &prog
->gen_flags
,
697 static struct tcf_proto_ops cls_bpf_ops __read_mostly
= {
699 .owner
= THIS_MODULE
,
700 .classify
= cls_bpf_classify
,
701 .init
= cls_bpf_init
,
702 .destroy
= cls_bpf_destroy
,
704 .change
= cls_bpf_change
,
705 .delete = cls_bpf_delete
,
706 .walk
= cls_bpf_walk
,
707 .reoffload
= cls_bpf_reoffload
,
708 .dump
= cls_bpf_dump
,
709 .bind_class
= cls_bpf_bind_class
,
712 static int __init
cls_bpf_init_mod(void)
714 return register_tcf_proto_ops(&cls_bpf_ops
);
717 static void __exit
cls_bpf_exit_mod(void)
719 unregister_tcf_proto_ops(&cls_bpf_ops
);
722 module_init(cls_bpf_init_mod
);
723 module_exit(cls_bpf_exit_mod
);