tty: serial: lpuart: avoid leaking struct tty_struct
[linux/fpc-iii.git] / net / sched / act_bpf.c
blob0c68bc9cf0b4df540a223e14dfa8ff569f96a40c
1 /*
2 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/filter.h>
16 #include <linux/bpf.h>
18 #include <net/netlink.h>
19 #include <net/pkt_sched.h>
21 #include <linux/tc_act/tc_bpf.h>
22 #include <net/tc_act/tc_bpf.h>
24 #define ACT_BPF_NAME_LEN 256
26 struct tcf_bpf_cfg {
27 struct bpf_prog *filter;
28 struct sock_filter *bpf_ops;
29 const char *bpf_name;
30 u16 bpf_num_ops;
31 bool is_ebpf;
34 static unsigned int bpf_net_id;
35 static struct tc_action_ops act_bpf_ops;
37 static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
38 struct tcf_result *res)
40 bool at_ingress = skb_at_tc_ingress(skb);
41 struct tcf_bpf *prog = to_bpf(act);
42 struct bpf_prog *filter;
43 int action, filter_res;
45 tcf_lastuse_update(&prog->tcf_tm);
46 bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
48 rcu_read_lock();
49 filter = rcu_dereference(prog->filter);
50 if (at_ingress) {
51 __skb_push(skb, skb->mac_len);
52 bpf_compute_data_pointers(skb);
53 filter_res = BPF_PROG_RUN(filter, skb);
54 __skb_pull(skb, skb->mac_len);
55 } else {
56 bpf_compute_data_pointers(skb);
57 filter_res = BPF_PROG_RUN(filter, skb);
59 rcu_read_unlock();
61 /* A BPF program may overwrite the default action opcode.
62 * Similarly as in cls_bpf, if filter_res == -1 we use the
63 * default action specified from tc.
65 * In case a different well-known TC_ACT opcode has been
66 * returned, it will overwrite the default one.
68 * For everything else that is unkown, TC_ACT_UNSPEC is
69 * returned.
71 switch (filter_res) {
72 case TC_ACT_PIPE:
73 case TC_ACT_RECLASSIFY:
74 case TC_ACT_OK:
75 case TC_ACT_REDIRECT:
76 action = filter_res;
77 break;
78 case TC_ACT_SHOT:
79 action = filter_res;
80 qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats));
81 break;
82 case TC_ACT_UNSPEC:
83 action = prog->tcf_action;
84 break;
85 default:
86 action = TC_ACT_UNSPEC;
87 break;
90 return action;
93 static bool tcf_bpf_is_ebpf(const struct tcf_bpf *prog)
95 return !prog->bpf_ops;
98 static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog,
99 struct sk_buff *skb)
101 struct nlattr *nla;
103 if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops))
104 return -EMSGSIZE;
106 nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops *
107 sizeof(struct sock_filter));
108 if (nla == NULL)
109 return -EMSGSIZE;
111 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
113 return 0;
116 static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
117 struct sk_buff *skb)
119 struct nlattr *nla;
121 if (prog->bpf_name &&
122 nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
123 return -EMSGSIZE;
125 if (nla_put_u32(skb, TCA_ACT_BPF_ID, prog->filter->aux->id))
126 return -EMSGSIZE;
128 nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag));
129 if (nla == NULL)
130 return -EMSGSIZE;
132 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
134 return 0;
137 static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
138 int bind, int ref)
140 unsigned char *tp = skb_tail_pointer(skb);
141 struct tcf_bpf *prog = to_bpf(act);
142 struct tc_act_bpf opt = {
143 .index = prog->tcf_index,
144 .refcnt = refcount_read(&prog->tcf_refcnt) - ref,
145 .bindcnt = atomic_read(&prog->tcf_bindcnt) - bind,
147 struct tcf_t tm;
148 int ret;
150 spin_lock_bh(&prog->tcf_lock);
151 opt.action = prog->tcf_action;
152 if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
153 goto nla_put_failure;
155 if (tcf_bpf_is_ebpf(prog))
156 ret = tcf_bpf_dump_ebpf_info(prog, skb);
157 else
158 ret = tcf_bpf_dump_bpf_info(prog, skb);
159 if (ret)
160 goto nla_put_failure;
162 tcf_tm_dump(&tm, &prog->tcf_tm);
163 if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm,
164 TCA_ACT_BPF_PAD))
165 goto nla_put_failure;
167 spin_unlock_bh(&prog->tcf_lock);
168 return skb->len;
170 nla_put_failure:
171 spin_unlock_bh(&prog->tcf_lock);
172 nlmsg_trim(skb, tp);
173 return -1;
176 static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
177 [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) },
178 [TCA_ACT_BPF_FD] = { .type = NLA_U32 },
179 [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING,
180 .len = ACT_BPF_NAME_LEN },
181 [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 },
182 [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY,
183 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
186 static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
188 struct sock_filter *bpf_ops;
189 struct sock_fprog_kern fprog_tmp;
190 struct bpf_prog *fp;
191 u16 bpf_size, bpf_num_ops;
192 int ret;
194 bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]);
195 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
196 return -EINVAL;
198 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
199 if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS]))
200 return -EINVAL;
202 bpf_ops = kmemdup(nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size, GFP_KERNEL);
203 if (bpf_ops == NULL)
204 return -ENOMEM;
206 fprog_tmp.len = bpf_num_ops;
207 fprog_tmp.filter = bpf_ops;
209 ret = bpf_prog_create(&fp, &fprog_tmp);
210 if (ret < 0) {
211 kfree(bpf_ops);
212 return ret;
215 cfg->bpf_ops = bpf_ops;
216 cfg->bpf_num_ops = bpf_num_ops;
217 cfg->filter = fp;
218 cfg->is_ebpf = false;
220 return 0;
223 static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
225 struct bpf_prog *fp;
226 char *name = NULL;
227 u32 bpf_fd;
229 bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]);
231 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_ACT);
232 if (IS_ERR(fp))
233 return PTR_ERR(fp);
235 if (tb[TCA_ACT_BPF_NAME]) {
236 name = nla_memdup(tb[TCA_ACT_BPF_NAME], GFP_KERNEL);
237 if (!name) {
238 bpf_prog_put(fp);
239 return -ENOMEM;
243 cfg->bpf_name = name;
244 cfg->filter = fp;
245 cfg->is_ebpf = true;
247 return 0;
250 static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
252 struct bpf_prog *filter = cfg->filter;
254 if (filter) {
255 if (cfg->is_ebpf)
256 bpf_prog_put(filter);
257 else
258 bpf_prog_destroy(filter);
261 kfree(cfg->bpf_ops);
262 kfree(cfg->bpf_name);
265 static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
266 struct tcf_bpf_cfg *cfg)
268 cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
269 /* updates to prog->filter are prevented, since it's called either
270 * with tcf lock or during final cleanup in rcu callback
272 cfg->filter = rcu_dereference_protected(prog->filter, 1);
274 cfg->bpf_ops = prog->bpf_ops;
275 cfg->bpf_name = prog->bpf_name;
278 static int tcf_bpf_init(struct net *net, struct nlattr *nla,
279 struct nlattr *est, struct tc_action **act,
280 int replace, int bind, bool rtnl_held,
281 struct netlink_ext_ack *extack)
283 struct tc_action_net *tn = net_generic(net, bpf_net_id);
284 struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
285 struct tcf_bpf_cfg cfg, old;
286 struct tc_act_bpf *parm;
287 struct tcf_bpf *prog;
288 bool is_bpf, is_ebpf;
289 int ret, res = 0;
291 if (!nla)
292 return -EINVAL;
294 ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy, NULL);
295 if (ret < 0)
296 return ret;
298 if (!tb[TCA_ACT_BPF_PARMS])
299 return -EINVAL;
301 parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
303 ret = tcf_idr_check_alloc(tn, &parm->index, act, bind);
304 if (!ret) {
305 ret = tcf_idr_create(tn, parm->index, est, act,
306 &act_bpf_ops, bind, true);
307 if (ret < 0) {
308 tcf_idr_cleanup(tn, parm->index);
309 return ret;
312 res = ACT_P_CREATED;
313 } else if (ret > 0) {
314 /* Don't override defaults. */
315 if (bind)
316 return 0;
318 if (!replace) {
319 tcf_idr_release(*act, bind);
320 return -EEXIST;
322 } else {
323 return ret;
326 is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
327 is_ebpf = tb[TCA_ACT_BPF_FD];
329 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
330 ret = -EINVAL;
331 goto out;
334 memset(&cfg, 0, sizeof(cfg));
336 ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
337 tcf_bpf_init_from_efd(tb, &cfg);
338 if (ret < 0)
339 goto out;
341 prog = to_bpf(*act);
343 spin_lock_bh(&prog->tcf_lock);
344 if (res != ACT_P_CREATED)
345 tcf_bpf_prog_fill_cfg(prog, &old);
347 prog->bpf_ops = cfg.bpf_ops;
348 prog->bpf_name = cfg.bpf_name;
350 if (cfg.bpf_num_ops)
351 prog->bpf_num_ops = cfg.bpf_num_ops;
353 prog->tcf_action = parm->action;
354 rcu_assign_pointer(prog->filter, cfg.filter);
355 spin_unlock_bh(&prog->tcf_lock);
357 if (res == ACT_P_CREATED) {
358 tcf_idr_insert(tn, *act);
359 } else {
360 /* make sure the program being replaced is no longer executing */
361 synchronize_rcu();
362 tcf_bpf_cfg_cleanup(&old);
365 return res;
366 out:
367 tcf_idr_release(*act, bind);
369 return ret;
372 static void tcf_bpf_cleanup(struct tc_action *act)
374 struct tcf_bpf_cfg tmp;
376 tcf_bpf_prog_fill_cfg(to_bpf(act), &tmp);
377 tcf_bpf_cfg_cleanup(&tmp);
380 static int tcf_bpf_walker(struct net *net, struct sk_buff *skb,
381 struct netlink_callback *cb, int type,
382 const struct tc_action_ops *ops,
383 struct netlink_ext_ack *extack)
385 struct tc_action_net *tn = net_generic(net, bpf_net_id);
387 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
390 static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index,
391 struct netlink_ext_ack *extack)
393 struct tc_action_net *tn = net_generic(net, bpf_net_id);
395 return tcf_idr_search(tn, a, index);
398 static struct tc_action_ops act_bpf_ops __read_mostly = {
399 .kind = "bpf",
400 .type = TCA_ACT_BPF,
401 .owner = THIS_MODULE,
402 .act = tcf_bpf_act,
403 .dump = tcf_bpf_dump,
404 .cleanup = tcf_bpf_cleanup,
405 .init = tcf_bpf_init,
406 .walk = tcf_bpf_walker,
407 .lookup = tcf_bpf_search,
408 .size = sizeof(struct tcf_bpf),
411 static __net_init int bpf_init_net(struct net *net)
413 struct tc_action_net *tn = net_generic(net, bpf_net_id);
415 return tc_action_net_init(tn, &act_bpf_ops);
418 static void __net_exit bpf_exit_net(struct list_head *net_list)
420 tc_action_net_exit(net_list, bpf_net_id);
423 static struct pernet_operations bpf_net_ops = {
424 .init = bpf_init_net,
425 .exit_batch = bpf_exit_net,
426 .id = &bpf_net_id,
427 .size = sizeof(struct tc_action_net),
430 static int __init bpf_init_module(void)
432 return tcf_register_action(&act_bpf_ops, &bpf_net_ops);
435 static void __exit bpf_cleanup_module(void)
437 tcf_unregister_action(&act_bpf_ops, &bpf_net_ops);
440 module_init(bpf_init_module);
441 module_exit(bpf_cleanup_module);
443 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
444 MODULE_DESCRIPTION("TC BPF based action");
445 MODULE_LICENSE("GPL v2");