2 * Berkeley Packet Filter based traffic classifier
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <net/rtnetlink.h>
20 #include <net/pkt_cls.h>
23 MODULE_LICENSE("GPL");
24 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
25 MODULE_DESCRIPTION("TC BPF based classifier");
28 struct list_head plist
;
33 struct sk_filter
*filter
;
34 struct sock_filter
*bpf_ops
;
36 struct tcf_result res
;
37 struct list_head link
;
42 static const struct nla_policy bpf_policy
[TCA_BPF_MAX
+ 1] = {
43 [TCA_BPF_CLASSID
] = { .type
= NLA_U32
},
44 [TCA_BPF_OPS_LEN
] = { .type
= NLA_U16
},
45 [TCA_BPF_OPS
] = { .type
= NLA_BINARY
,
46 .len
= sizeof(struct sock_filter
) * BPF_MAXINSNS
},
49 static int cls_bpf_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
50 struct tcf_result
*res
)
52 struct cls_bpf_head
*head
= tp
->root
;
53 struct cls_bpf_prog
*prog
;
56 list_for_each_entry(prog
, &head
->plist
, link
) {
57 int filter_res
= SK_RUN_FILTER(prog
->filter
, skb
);
64 res
->classid
= filter_res
;
66 ret
= tcf_exts_exec(skb
, &prog
->exts
, res
);
76 static int cls_bpf_init(struct tcf_proto
*tp
)
78 struct cls_bpf_head
*head
;
80 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
84 INIT_LIST_HEAD(&head
->plist
);
90 static void cls_bpf_delete_prog(struct tcf_proto
*tp
, struct cls_bpf_prog
*prog
)
92 tcf_unbind_filter(tp
, &prog
->res
);
93 tcf_exts_destroy(tp
, &prog
->exts
);
95 sk_unattached_filter_destroy(prog
->filter
);
101 static int cls_bpf_delete(struct tcf_proto
*tp
, unsigned long arg
)
103 struct cls_bpf_head
*head
= tp
->root
;
104 struct cls_bpf_prog
*prog
, *todel
= (struct cls_bpf_prog
*) arg
;
106 list_for_each_entry(prog
, &head
->plist
, link
) {
109 list_del(&prog
->link
);
112 cls_bpf_delete_prog(tp
, prog
);
120 static void cls_bpf_destroy(struct tcf_proto
*tp
)
122 struct cls_bpf_head
*head
= tp
->root
;
123 struct cls_bpf_prog
*prog
, *tmp
;
125 list_for_each_entry_safe(prog
, tmp
, &head
->plist
, link
) {
126 list_del(&prog
->link
);
127 cls_bpf_delete_prog(tp
, prog
);
133 static unsigned long cls_bpf_get(struct tcf_proto
*tp
, u32 handle
)
135 struct cls_bpf_head
*head
= tp
->root
;
136 struct cls_bpf_prog
*prog
;
137 unsigned long ret
= 0UL;
142 list_for_each_entry(prog
, &head
->plist
, link
) {
143 if (prog
->handle
== handle
) {
144 ret
= (unsigned long) prog
;
152 static void cls_bpf_put(struct tcf_proto
*tp
, unsigned long f
)
156 static int cls_bpf_modify_existing(struct net
*net
, struct tcf_proto
*tp
,
157 struct cls_bpf_prog
*prog
,
158 unsigned long base
, struct nlattr
**tb
,
161 struct sock_filter
*bpf_ops
, *bpf_old
;
162 struct tcf_exts exts
;
163 struct sock_fprog tmp
;
164 struct sk_filter
*fp
, *fp_old
;
165 u16 bpf_size
, bpf_len
;
169 if (!tb
[TCA_BPF_OPS_LEN
] || !tb
[TCA_BPF_OPS
] || !tb
[TCA_BPF_CLASSID
])
172 tcf_exts_init(&exts
, TCA_BPF_ACT
, TCA_BPF_POLICE
);
173 ret
= tcf_exts_validate(net
, tp
, tb
, est
, &exts
);
177 classid
= nla_get_u32(tb
[TCA_BPF_CLASSID
]);
178 bpf_len
= nla_get_u16(tb
[TCA_BPF_OPS_LEN
]);
179 if (bpf_len
> BPF_MAXINSNS
|| bpf_len
== 0) {
184 bpf_size
= bpf_len
* sizeof(*bpf_ops
);
185 bpf_ops
= kzalloc(bpf_size
, GFP_KERNEL
);
186 if (bpf_ops
== NULL
) {
191 memcpy(bpf_ops
, nla_data(tb
[TCA_BPF_OPS
]), bpf_size
);
194 tmp
.filter
= (struct sock_filter __user
*) bpf_ops
;
196 ret
= sk_unattached_filter_create(&fp
, &tmp
);
201 fp_old
= prog
->filter
;
202 bpf_old
= prog
->bpf_ops
;
204 prog
->bpf_len
= bpf_len
;
205 prog
->bpf_ops
= bpf_ops
;
207 prog
->res
.classid
= classid
;
210 tcf_bind_filter(tp
, &prog
->res
, base
);
211 tcf_exts_change(tp
, &prog
->exts
, &exts
);
214 sk_unattached_filter_destroy(fp_old
);
223 tcf_exts_destroy(tp
, &exts
);
227 static u32
cls_bpf_grab_new_handle(struct tcf_proto
*tp
,
228 struct cls_bpf_head
*head
)
230 unsigned int i
= 0x80000000;
233 if (++head
->hgen
== 0x7FFFFFFF)
235 } while (--i
> 0 && cls_bpf_get(tp
, head
->hgen
));
237 pr_err("Insufficient number of handles\n");
242 static int cls_bpf_change(struct net
*net
, struct sk_buff
*in_skb
,
243 struct tcf_proto
*tp
, unsigned long base
,
244 u32 handle
, struct nlattr
**tca
,
247 struct cls_bpf_head
*head
= tp
->root
;
248 struct cls_bpf_prog
*prog
= (struct cls_bpf_prog
*) *arg
;
249 struct nlattr
*tb
[TCA_BPF_MAX
+ 1];
252 if (tca
[TCA_OPTIONS
] == NULL
)
255 ret
= nla_parse_nested(tb
, TCA_BPF_MAX
, tca
[TCA_OPTIONS
], bpf_policy
);
260 if (handle
&& prog
->handle
!= handle
)
262 return cls_bpf_modify_existing(net
, tp
, prog
, base
, tb
,
266 prog
= kzalloc(sizeof(*prog
), GFP_KERNEL
);
270 tcf_exts_init(&prog
->exts
, TCA_BPF_ACT
, TCA_BPF_POLICE
);
272 prog
->handle
= cls_bpf_grab_new_handle(tp
, head
);
274 prog
->handle
= handle
;
275 if (prog
->handle
== 0) {
280 ret
= cls_bpf_modify_existing(net
, tp
, prog
, base
, tb
, tca
[TCA_RATE
]);
285 list_add(&prog
->link
, &head
->plist
);
288 *arg
= (unsigned long) prog
;
292 if (*arg
== 0UL && prog
)
298 static int cls_bpf_dump(struct net
*net
, struct tcf_proto
*tp
, unsigned long fh
,
299 struct sk_buff
*skb
, struct tcmsg
*tm
)
301 struct cls_bpf_prog
*prog
= (struct cls_bpf_prog
*) fh
;
302 struct nlattr
*nest
, *nla
;
307 tm
->tcm_handle
= prog
->handle
;
309 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
311 goto nla_put_failure
;
313 if (nla_put_u32(skb
, TCA_BPF_CLASSID
, prog
->res
.classid
))
314 goto nla_put_failure
;
315 if (nla_put_u16(skb
, TCA_BPF_OPS_LEN
, prog
->bpf_len
))
316 goto nla_put_failure
;
318 nla
= nla_reserve(skb
, TCA_BPF_OPS
, prog
->bpf_len
*
319 sizeof(struct sock_filter
));
321 goto nla_put_failure
;
323 memcpy(nla_data(nla
), prog
->bpf_ops
, nla_len(nla
));
325 if (tcf_exts_dump(skb
, &prog
->exts
) < 0)
326 goto nla_put_failure
;
328 nla_nest_end(skb
, nest
);
330 if (tcf_exts_dump_stats(skb
, &prog
->exts
) < 0)
331 goto nla_put_failure
;
336 nla_nest_cancel(skb
, nest
);
340 static void cls_bpf_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
342 struct cls_bpf_head
*head
= tp
->root
;
343 struct cls_bpf_prog
*prog
;
345 list_for_each_entry(prog
, &head
->plist
, link
) {
346 if (arg
->count
< arg
->skip
)
348 if (arg
->fn(tp
, (unsigned long) prog
, arg
) < 0) {
357 static struct tcf_proto_ops cls_bpf_ops __read_mostly
= {
359 .owner
= THIS_MODULE
,
360 .classify
= cls_bpf_classify
,
361 .init
= cls_bpf_init
,
362 .destroy
= cls_bpf_destroy
,
365 .change
= cls_bpf_change
,
366 .delete = cls_bpf_delete
,
367 .walk
= cls_bpf_walk
,
368 .dump
= cls_bpf_dump
,
371 static int __init
cls_bpf_init_mod(void)
373 return register_tcf_proto_ops(&cls_bpf_ops
);
376 static void __exit
cls_bpf_exit_mod(void)
378 unregister_tcf_proto_ops(&cls_bpf_ops
);
381 module_init(cls_bpf_init_mod
);
382 module_exit(cls_bpf_exit_mod
);