2 * Berkeley Packet Filter based traffic classifier
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
21 #include <net/rtnetlink.h>
22 #include <net/pkt_cls.h>
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27 MODULE_DESCRIPTION("TC BPF based classifier");
29 #define CLS_BPF_NAME_LEN 256
32 struct list_head plist
;
38 struct bpf_prog
*filter
;
39 struct list_head link
;
40 struct tcf_result res
;
47 struct sock_filter
*bpf_ops
;
53 static const struct nla_policy bpf_policy
[TCA_BPF_MAX
+ 1] = {
54 [TCA_BPF_CLASSID
] = { .type
= NLA_U32
},
55 [TCA_BPF_FD
] = { .type
= NLA_U32
},
56 [TCA_BPF_NAME
] = { .type
= NLA_NUL_STRING
, .len
= CLS_BPF_NAME_LEN
},
57 [TCA_BPF_OPS_LEN
] = { .type
= NLA_U16
},
58 [TCA_BPF_OPS
] = { .type
= NLA_BINARY
,
59 .len
= sizeof(struct sock_filter
) * BPF_MAXINSNS
},
62 static int cls_bpf_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
63 struct tcf_result
*res
)
65 struct cls_bpf_head
*head
= rcu_dereference_bh(tp
->root
);
66 struct cls_bpf_prog
*prog
;
69 if (unlikely(!skb_mac_header_was_set(skb
)))
72 /* Needed here for accessing maps. */
74 list_for_each_entry_rcu(prog
, &head
->plist
, link
) {
75 int filter_res
= BPF_PROG_RUN(prog
->filter
, skb
);
82 res
->classid
= filter_res
;
84 ret
= tcf_exts_exec(skb
, &prog
->exts
, res
);
95 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog
*prog
)
97 return !prog
->bpf_ops
;
100 static int cls_bpf_init(struct tcf_proto
*tp
)
102 struct cls_bpf_head
*head
;
104 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
108 INIT_LIST_HEAD_RCU(&head
->plist
);
109 rcu_assign_pointer(tp
->root
, head
);
114 static void cls_bpf_delete_prog(struct tcf_proto
*tp
, struct cls_bpf_prog
*prog
)
116 tcf_exts_destroy(&prog
->exts
);
118 if (cls_bpf_is_ebpf(prog
))
119 bpf_prog_put(prog
->filter
);
121 bpf_prog_destroy(prog
->filter
);
123 kfree(prog
->bpf_name
);
124 kfree(prog
->bpf_ops
);
128 static void __cls_bpf_delete_prog(struct rcu_head
*rcu
)
130 struct cls_bpf_prog
*prog
= container_of(rcu
, struct cls_bpf_prog
, rcu
);
132 cls_bpf_delete_prog(prog
->tp
, prog
);
135 static int cls_bpf_delete(struct tcf_proto
*tp
, unsigned long arg
)
137 struct cls_bpf_prog
*prog
= (struct cls_bpf_prog
*) arg
;
139 list_del_rcu(&prog
->link
);
140 tcf_unbind_filter(tp
, &prog
->res
);
141 call_rcu(&prog
->rcu
, __cls_bpf_delete_prog
);
146 static bool cls_bpf_destroy(struct tcf_proto
*tp
, bool force
)
148 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
149 struct cls_bpf_prog
*prog
, *tmp
;
151 if (!force
&& !list_empty(&head
->plist
))
154 list_for_each_entry_safe(prog
, tmp
, &head
->plist
, link
) {
155 list_del_rcu(&prog
->link
);
156 tcf_unbind_filter(tp
, &prog
->res
);
157 call_rcu(&prog
->rcu
, __cls_bpf_delete_prog
);
160 RCU_INIT_POINTER(tp
->root
, NULL
);
161 kfree_rcu(head
, rcu
);
165 static unsigned long cls_bpf_get(struct tcf_proto
*tp
, u32 handle
)
167 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
168 struct cls_bpf_prog
*prog
;
169 unsigned long ret
= 0UL;
174 list_for_each_entry(prog
, &head
->plist
, link
) {
175 if (prog
->handle
== handle
) {
176 ret
= (unsigned long) prog
;
184 static int cls_bpf_prog_from_ops(struct nlattr
**tb
,
185 struct cls_bpf_prog
*prog
, u32 classid
)
187 struct sock_filter
*bpf_ops
;
188 struct sock_fprog_kern fprog_tmp
;
190 u16 bpf_size
, bpf_num_ops
;
193 bpf_num_ops
= nla_get_u16(tb
[TCA_BPF_OPS_LEN
]);
194 if (bpf_num_ops
> BPF_MAXINSNS
|| bpf_num_ops
== 0)
197 bpf_size
= bpf_num_ops
* sizeof(*bpf_ops
);
198 if (bpf_size
!= nla_len(tb
[TCA_BPF_OPS
]))
201 bpf_ops
= kzalloc(bpf_size
, GFP_KERNEL
);
205 memcpy(bpf_ops
, nla_data(tb
[TCA_BPF_OPS
]), bpf_size
);
207 fprog_tmp
.len
= bpf_num_ops
;
208 fprog_tmp
.filter
= bpf_ops
;
210 ret
= bpf_prog_create(&fp
, &fprog_tmp
);
216 prog
->bpf_ops
= bpf_ops
;
217 prog
->bpf_num_ops
= bpf_num_ops
;
218 prog
->bpf_name
= NULL
;
221 prog
->res
.classid
= classid
;
226 static int cls_bpf_prog_from_efd(struct nlattr
**tb
,
227 struct cls_bpf_prog
*prog
, u32 classid
)
233 bpf_fd
= nla_get_u32(tb
[TCA_BPF_FD
]);
235 fp
= bpf_prog_get(bpf_fd
);
239 if (fp
->type
!= BPF_PROG_TYPE_SCHED_CLS
) {
244 if (tb
[TCA_BPF_NAME
]) {
245 name
= kmemdup(nla_data(tb
[TCA_BPF_NAME
]),
246 nla_len(tb
[TCA_BPF_NAME
]),
254 prog
->bpf_ops
= NULL
;
255 prog
->bpf_fd
= bpf_fd
;
256 prog
->bpf_name
= name
;
259 prog
->res
.classid
= classid
;
264 static int cls_bpf_modify_existing(struct net
*net
, struct tcf_proto
*tp
,
265 struct cls_bpf_prog
*prog
,
266 unsigned long base
, struct nlattr
**tb
,
267 struct nlattr
*est
, bool ovr
)
269 struct tcf_exts exts
;
270 bool is_bpf
, is_ebpf
;
274 is_bpf
= tb
[TCA_BPF_OPS_LEN
] && tb
[TCA_BPF_OPS
];
275 is_ebpf
= tb
[TCA_BPF_FD
];
277 if ((!is_bpf
&& !is_ebpf
) || (is_bpf
&& is_ebpf
) ||
278 !tb
[TCA_BPF_CLASSID
])
281 tcf_exts_init(&exts
, TCA_BPF_ACT
, TCA_BPF_POLICE
);
282 ret
= tcf_exts_validate(net
, tp
, tb
, est
, &exts
, ovr
);
286 classid
= nla_get_u32(tb
[TCA_BPF_CLASSID
]);
288 ret
= is_bpf
? cls_bpf_prog_from_ops(tb
, prog
, classid
) :
289 cls_bpf_prog_from_efd(tb
, prog
, classid
);
291 tcf_exts_destroy(&exts
);
295 tcf_bind_filter(tp
, &prog
->res
, base
);
296 tcf_exts_change(tp
, &prog
->exts
, &exts
);
301 static u32
cls_bpf_grab_new_handle(struct tcf_proto
*tp
,
302 struct cls_bpf_head
*head
)
304 unsigned int i
= 0x80000000;
308 if (++head
->hgen
== 0x7FFFFFFF)
310 } while (--i
> 0 && cls_bpf_get(tp
, head
->hgen
));
312 if (unlikely(i
== 0)) {
313 pr_err("Insufficient number of handles\n");
322 static int cls_bpf_change(struct net
*net
, struct sk_buff
*in_skb
,
323 struct tcf_proto
*tp
, unsigned long base
,
324 u32 handle
, struct nlattr
**tca
,
325 unsigned long *arg
, bool ovr
)
327 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
328 struct cls_bpf_prog
*oldprog
= (struct cls_bpf_prog
*) *arg
;
329 struct nlattr
*tb
[TCA_BPF_MAX
+ 1];
330 struct cls_bpf_prog
*prog
;
333 if (tca
[TCA_OPTIONS
] == NULL
)
336 ret
= nla_parse_nested(tb
, TCA_BPF_MAX
, tca
[TCA_OPTIONS
], bpf_policy
);
340 prog
= kzalloc(sizeof(*prog
), GFP_KERNEL
);
344 tcf_exts_init(&prog
->exts
, TCA_BPF_ACT
, TCA_BPF_POLICE
);
347 if (handle
&& oldprog
->handle
!= handle
) {
354 prog
->handle
= cls_bpf_grab_new_handle(tp
, head
);
356 prog
->handle
= handle
;
357 if (prog
->handle
== 0) {
362 ret
= cls_bpf_modify_existing(net
, tp
, prog
, base
, tb
, tca
[TCA_RATE
], ovr
);
367 list_replace_rcu(&oldprog
->link
, &prog
->link
);
368 tcf_unbind_filter(tp
, &oldprog
->res
);
369 call_rcu(&oldprog
->rcu
, __cls_bpf_delete_prog
);
371 list_add_rcu(&prog
->link
, &head
->plist
);
374 *arg
= (unsigned long) prog
;
382 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog
*prog
,
387 if (nla_put_u16(skb
, TCA_BPF_OPS_LEN
, prog
->bpf_num_ops
))
390 nla
= nla_reserve(skb
, TCA_BPF_OPS
, prog
->bpf_num_ops
*
391 sizeof(struct sock_filter
));
395 memcpy(nla_data(nla
), prog
->bpf_ops
, nla_len(nla
));
400 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog
*prog
,
403 if (nla_put_u32(skb
, TCA_BPF_FD
, prog
->bpf_fd
))
406 if (prog
->bpf_name
&&
407 nla_put_string(skb
, TCA_BPF_NAME
, prog
->bpf_name
))
413 static int cls_bpf_dump(struct net
*net
, struct tcf_proto
*tp
, unsigned long fh
,
414 struct sk_buff
*skb
, struct tcmsg
*tm
)
416 struct cls_bpf_prog
*prog
= (struct cls_bpf_prog
*) fh
;
423 tm
->tcm_handle
= prog
->handle
;
425 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
427 goto nla_put_failure
;
429 if (nla_put_u32(skb
, TCA_BPF_CLASSID
, prog
->res
.classid
))
430 goto nla_put_failure
;
432 if (cls_bpf_is_ebpf(prog
))
433 ret
= cls_bpf_dump_ebpf_info(prog
, skb
);
435 ret
= cls_bpf_dump_bpf_info(prog
, skb
);
437 goto nla_put_failure
;
439 if (tcf_exts_dump(skb
, &prog
->exts
) < 0)
440 goto nla_put_failure
;
442 nla_nest_end(skb
, nest
);
444 if (tcf_exts_dump_stats(skb
, &prog
->exts
) < 0)
445 goto nla_put_failure
;
450 nla_nest_cancel(skb
, nest
);
454 static void cls_bpf_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
456 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
457 struct cls_bpf_prog
*prog
;
459 list_for_each_entry(prog
, &head
->plist
, link
) {
460 if (arg
->count
< arg
->skip
)
462 if (arg
->fn(tp
, (unsigned long) prog
, arg
) < 0) {
471 static struct tcf_proto_ops cls_bpf_ops __read_mostly
= {
473 .owner
= THIS_MODULE
,
474 .classify
= cls_bpf_classify
,
475 .init
= cls_bpf_init
,
476 .destroy
= cls_bpf_destroy
,
478 .change
= cls_bpf_change
,
479 .delete = cls_bpf_delete
,
480 .walk
= cls_bpf_walk
,
481 .dump
= cls_bpf_dump
,
484 static int __init
cls_bpf_init_mod(void)
486 return register_tcf_proto_ops(&cls_bpf_ops
);
489 static void __exit
cls_bpf_exit_mod(void)
491 unregister_tcf_proto_ops(&cls_bpf_ops
);
494 module_init(cls_bpf_init_mod
);
495 module_exit(cls_bpf_exit_mod
);