2 * Berkeley Packet Filter based traffic classifier
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
21 #include <net/rtnetlink.h>
22 #include <net/pkt_cls.h>
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27 MODULE_DESCRIPTION("TC BPF based classifier");
29 #define CLS_BPF_NAME_LEN 256
32 struct list_head plist
;
38 struct bpf_prog
*filter
;
39 struct list_head link
;
40 struct tcf_result res
;
48 struct sock_filter
*bpf_ops
;
54 static const struct nla_policy bpf_policy
[TCA_BPF_MAX
+ 1] = {
55 [TCA_BPF_CLASSID
] = { .type
= NLA_U32
},
56 [TCA_BPF_FLAGS
] = { .type
= NLA_U32
},
57 [TCA_BPF_FD
] = { .type
= NLA_U32
},
58 [TCA_BPF_NAME
] = { .type
= NLA_NUL_STRING
, .len
= CLS_BPF_NAME_LEN
},
59 [TCA_BPF_OPS_LEN
] = { .type
= NLA_U16
},
60 [TCA_BPF_OPS
] = { .type
= NLA_BINARY
,
61 .len
= sizeof(struct sock_filter
) * BPF_MAXINSNS
},
64 static int cls_bpf_exec_opcode(int code
)
78 static int cls_bpf_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
79 struct tcf_result
*res
)
81 struct cls_bpf_head
*head
= rcu_dereference_bh(tp
->root
);
82 bool at_ingress
= skb_at_tc_ingress(skb
);
83 struct cls_bpf_prog
*prog
;
86 if (unlikely(!skb_mac_header_was_set(skb
)))
89 /* Needed here for accessing maps. */
91 list_for_each_entry_rcu(prog
, &head
->plist
, link
) {
94 qdisc_skb_cb(skb
)->tc_classid
= prog
->res
.classid
;
97 /* It is safe to push/pull even if skb_shared() */
98 __skb_push(skb
, skb
->mac_len
);
99 bpf_compute_data_end(skb
);
100 filter_res
= BPF_PROG_RUN(prog
->filter
, skb
);
101 __skb_pull(skb
, skb
->mac_len
);
103 bpf_compute_data_end(skb
);
104 filter_res
= BPF_PROG_RUN(prog
->filter
, skb
);
107 if (prog
->exts_integrated
) {
109 res
->classid
= TC_H_MAJ(prog
->res
.classid
) |
110 qdisc_skb_cb(skb
)->tc_classid
;
112 ret
= cls_bpf_exec_opcode(filter_res
);
113 if (ret
== TC_ACT_UNSPEC
)
120 if (filter_res
!= -1) {
122 res
->classid
= filter_res
;
127 ret
= tcf_exts_exec(skb
, &prog
->exts
, res
);
138 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog
*prog
)
140 return !prog
->bpf_ops
;
143 static int cls_bpf_init(struct tcf_proto
*tp
)
145 struct cls_bpf_head
*head
;
147 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
151 INIT_LIST_HEAD_RCU(&head
->plist
);
152 rcu_assign_pointer(tp
->root
, head
);
157 static void cls_bpf_delete_prog(struct tcf_proto
*tp
, struct cls_bpf_prog
*prog
)
159 tcf_exts_destroy(&prog
->exts
);
161 if (cls_bpf_is_ebpf(prog
))
162 bpf_prog_put(prog
->filter
);
164 bpf_prog_destroy(prog
->filter
);
166 kfree(prog
->bpf_name
);
167 kfree(prog
->bpf_ops
);
171 static void __cls_bpf_delete_prog(struct rcu_head
*rcu
)
173 struct cls_bpf_prog
*prog
= container_of(rcu
, struct cls_bpf_prog
, rcu
);
175 cls_bpf_delete_prog(prog
->tp
, prog
);
178 static int cls_bpf_delete(struct tcf_proto
*tp
, unsigned long arg
)
180 struct cls_bpf_prog
*prog
= (struct cls_bpf_prog
*) arg
;
182 list_del_rcu(&prog
->link
);
183 tcf_unbind_filter(tp
, &prog
->res
);
184 call_rcu(&prog
->rcu
, __cls_bpf_delete_prog
);
189 static bool cls_bpf_destroy(struct tcf_proto
*tp
, bool force
)
191 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
192 struct cls_bpf_prog
*prog
, *tmp
;
194 if (!force
&& !list_empty(&head
->plist
))
197 list_for_each_entry_safe(prog
, tmp
, &head
->plist
, link
) {
198 list_del_rcu(&prog
->link
);
199 tcf_unbind_filter(tp
, &prog
->res
);
200 call_rcu(&prog
->rcu
, __cls_bpf_delete_prog
);
203 RCU_INIT_POINTER(tp
->root
, NULL
);
204 kfree_rcu(head
, rcu
);
208 static unsigned long cls_bpf_get(struct tcf_proto
*tp
, u32 handle
)
210 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
211 struct cls_bpf_prog
*prog
;
212 unsigned long ret
= 0UL;
217 list_for_each_entry(prog
, &head
->plist
, link
) {
218 if (prog
->handle
== handle
) {
219 ret
= (unsigned long) prog
;
227 static int cls_bpf_prog_from_ops(struct nlattr
**tb
, struct cls_bpf_prog
*prog
)
229 struct sock_filter
*bpf_ops
;
230 struct sock_fprog_kern fprog_tmp
;
232 u16 bpf_size
, bpf_num_ops
;
235 bpf_num_ops
= nla_get_u16(tb
[TCA_BPF_OPS_LEN
]);
236 if (bpf_num_ops
> BPF_MAXINSNS
|| bpf_num_ops
== 0)
239 bpf_size
= bpf_num_ops
* sizeof(*bpf_ops
);
240 if (bpf_size
!= nla_len(tb
[TCA_BPF_OPS
]))
243 bpf_ops
= kzalloc(bpf_size
, GFP_KERNEL
);
247 memcpy(bpf_ops
, nla_data(tb
[TCA_BPF_OPS
]), bpf_size
);
249 fprog_tmp
.len
= bpf_num_ops
;
250 fprog_tmp
.filter
= bpf_ops
;
252 ret
= bpf_prog_create(&fp
, &fprog_tmp
);
258 prog
->bpf_ops
= bpf_ops
;
259 prog
->bpf_num_ops
= bpf_num_ops
;
260 prog
->bpf_name
= NULL
;
266 static int cls_bpf_prog_from_efd(struct nlattr
**tb
, struct cls_bpf_prog
*prog
,
267 const struct tcf_proto
*tp
)
273 bpf_fd
= nla_get_u32(tb
[TCA_BPF_FD
]);
275 fp
= bpf_prog_get_type(bpf_fd
, BPF_PROG_TYPE_SCHED_CLS
);
279 if (tb
[TCA_BPF_NAME
]) {
280 name
= kmemdup(nla_data(tb
[TCA_BPF_NAME
]),
281 nla_len(tb
[TCA_BPF_NAME
]),
289 prog
->bpf_ops
= NULL
;
290 prog
->bpf_fd
= bpf_fd
;
291 prog
->bpf_name
= name
;
294 if (fp
->dst_needed
&& !(tp
->q
->flags
& TCQ_F_INGRESS
))
295 netif_keep_dst(qdisc_dev(tp
->q
));
300 static int cls_bpf_modify_existing(struct net
*net
, struct tcf_proto
*tp
,
301 struct cls_bpf_prog
*prog
,
302 unsigned long base
, struct nlattr
**tb
,
303 struct nlattr
*est
, bool ovr
)
305 bool is_bpf
, is_ebpf
, have_exts
= false;
306 struct tcf_exts exts
;
309 is_bpf
= tb
[TCA_BPF_OPS_LEN
] && tb
[TCA_BPF_OPS
];
310 is_ebpf
= tb
[TCA_BPF_FD
];
311 if ((!is_bpf
&& !is_ebpf
) || (is_bpf
&& is_ebpf
))
314 tcf_exts_init(&exts
, TCA_BPF_ACT
, TCA_BPF_POLICE
);
315 ret
= tcf_exts_validate(net
, tp
, tb
, est
, &exts
, ovr
);
319 if (tb
[TCA_BPF_FLAGS
]) {
320 u32 bpf_flags
= nla_get_u32(tb
[TCA_BPF_FLAGS
]);
322 if (bpf_flags
& ~TCA_BPF_FLAG_ACT_DIRECT
) {
323 tcf_exts_destroy(&exts
);
327 have_exts
= bpf_flags
& TCA_BPF_FLAG_ACT_DIRECT
;
330 prog
->exts_integrated
= have_exts
;
332 ret
= is_bpf
? cls_bpf_prog_from_ops(tb
, prog
) :
333 cls_bpf_prog_from_efd(tb
, prog
, tp
);
335 tcf_exts_destroy(&exts
);
339 if (tb
[TCA_BPF_CLASSID
]) {
340 prog
->res
.classid
= nla_get_u32(tb
[TCA_BPF_CLASSID
]);
341 tcf_bind_filter(tp
, &prog
->res
, base
);
344 tcf_exts_change(tp
, &prog
->exts
, &exts
);
348 static u32
cls_bpf_grab_new_handle(struct tcf_proto
*tp
,
349 struct cls_bpf_head
*head
)
351 unsigned int i
= 0x80000000;
355 if (++head
->hgen
== 0x7FFFFFFF)
357 } while (--i
> 0 && cls_bpf_get(tp
, head
->hgen
));
359 if (unlikely(i
== 0)) {
360 pr_err("Insufficient number of handles\n");
369 static int cls_bpf_change(struct net
*net
, struct sk_buff
*in_skb
,
370 struct tcf_proto
*tp
, unsigned long base
,
371 u32 handle
, struct nlattr
**tca
,
372 unsigned long *arg
, bool ovr
)
374 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
375 struct cls_bpf_prog
*oldprog
= (struct cls_bpf_prog
*) *arg
;
376 struct nlattr
*tb
[TCA_BPF_MAX
+ 1];
377 struct cls_bpf_prog
*prog
;
380 if (tca
[TCA_OPTIONS
] == NULL
)
383 ret
= nla_parse_nested(tb
, TCA_BPF_MAX
, tca
[TCA_OPTIONS
], bpf_policy
);
387 prog
= kzalloc(sizeof(*prog
), GFP_KERNEL
);
391 tcf_exts_init(&prog
->exts
, TCA_BPF_ACT
, TCA_BPF_POLICE
);
394 if (handle
&& oldprog
->handle
!= handle
) {
401 prog
->handle
= cls_bpf_grab_new_handle(tp
, head
);
403 prog
->handle
= handle
;
404 if (prog
->handle
== 0) {
409 ret
= cls_bpf_modify_existing(net
, tp
, prog
, base
, tb
, tca
[TCA_RATE
], ovr
);
414 list_replace_rcu(&oldprog
->link
, &prog
->link
);
415 tcf_unbind_filter(tp
, &oldprog
->res
);
416 call_rcu(&oldprog
->rcu
, __cls_bpf_delete_prog
);
418 list_add_rcu(&prog
->link
, &head
->plist
);
421 *arg
= (unsigned long) prog
;
429 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog
*prog
,
434 if (nla_put_u16(skb
, TCA_BPF_OPS_LEN
, prog
->bpf_num_ops
))
437 nla
= nla_reserve(skb
, TCA_BPF_OPS
, prog
->bpf_num_ops
*
438 sizeof(struct sock_filter
));
442 memcpy(nla_data(nla
), prog
->bpf_ops
, nla_len(nla
));
447 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog
*prog
,
450 if (nla_put_u32(skb
, TCA_BPF_FD
, prog
->bpf_fd
))
453 if (prog
->bpf_name
&&
454 nla_put_string(skb
, TCA_BPF_NAME
, prog
->bpf_name
))
460 static int cls_bpf_dump(struct net
*net
, struct tcf_proto
*tp
, unsigned long fh
,
461 struct sk_buff
*skb
, struct tcmsg
*tm
)
463 struct cls_bpf_prog
*prog
= (struct cls_bpf_prog
*) fh
;
471 tm
->tcm_handle
= prog
->handle
;
473 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
475 goto nla_put_failure
;
477 if (prog
->res
.classid
&&
478 nla_put_u32(skb
, TCA_BPF_CLASSID
, prog
->res
.classid
))
479 goto nla_put_failure
;
481 if (cls_bpf_is_ebpf(prog
))
482 ret
= cls_bpf_dump_ebpf_info(prog
, skb
);
484 ret
= cls_bpf_dump_bpf_info(prog
, skb
);
486 goto nla_put_failure
;
488 if (tcf_exts_dump(skb
, &prog
->exts
) < 0)
489 goto nla_put_failure
;
491 if (prog
->exts_integrated
)
492 bpf_flags
|= TCA_BPF_FLAG_ACT_DIRECT
;
493 if (bpf_flags
&& nla_put_u32(skb
, TCA_BPF_FLAGS
, bpf_flags
))
494 goto nla_put_failure
;
496 nla_nest_end(skb
, nest
);
498 if (tcf_exts_dump_stats(skb
, &prog
->exts
) < 0)
499 goto nla_put_failure
;
504 nla_nest_cancel(skb
, nest
);
508 static void cls_bpf_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
510 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
511 struct cls_bpf_prog
*prog
;
513 list_for_each_entry(prog
, &head
->plist
, link
) {
514 if (arg
->count
< arg
->skip
)
516 if (arg
->fn(tp
, (unsigned long) prog
, arg
) < 0) {
525 static struct tcf_proto_ops cls_bpf_ops __read_mostly
= {
527 .owner
= THIS_MODULE
,
528 .classify
= cls_bpf_classify
,
529 .init
= cls_bpf_init
,
530 .destroy
= cls_bpf_destroy
,
532 .change
= cls_bpf_change
,
533 .delete = cls_bpf_delete
,
534 .walk
= cls_bpf_walk
,
535 .dump
= cls_bpf_dump
,
538 static int __init
cls_bpf_init_mod(void)
540 return register_tcf_proto_ops(&cls_bpf_ops
);
543 static void __exit
cls_bpf_exit_mod(void)
545 unregister_tcf_proto_ops(&cls_bpf_ops
);
548 module_init(cls_bpf_init_mod
);
549 module_exit(cls_bpf_exit_mod
);