2 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/filter.h>
16 #include <linux/bpf.h>
18 #include <net/netlink.h>
19 #include <net/pkt_sched.h>
21 #include <linux/tc_act/tc_bpf.h>
22 #include <net/tc_act/tc_bpf.h>
24 #define BPF_TAB_MASK 15
25 #define ACT_BPF_NAME_LEN 256
28 struct bpf_prog
*filter
;
29 struct sock_filter
*bpf_ops
;
36 static int bpf_net_id
;
37 static struct tc_action_ops act_bpf_ops
;
39 static int tcf_bpf(struct sk_buff
*skb
, const struct tc_action
*act
,
40 struct tcf_result
*res
)
42 struct tcf_bpf
*prog
= to_bpf(act
);
43 struct bpf_prog
*filter
;
44 int action
, filter_res
;
45 bool at_ingress
= G_TC_AT(skb
->tc_verd
) & AT_INGRESS
;
47 if (unlikely(!skb_mac_header_was_set(skb
)))
50 tcf_lastuse_update(&prog
->tcf_tm
);
51 bstats_cpu_update(this_cpu_ptr(prog
->common
.cpu_bstats
), skb
);
54 filter
= rcu_dereference(prog
->filter
);
56 __skb_push(skb
, skb
->mac_len
);
57 bpf_compute_data_end(skb
);
58 filter_res
= BPF_PROG_RUN(filter
, skb
);
59 __skb_pull(skb
, skb
->mac_len
);
61 bpf_compute_data_end(skb
);
62 filter_res
= BPF_PROG_RUN(filter
, skb
);
66 /* A BPF program may overwrite the default action opcode.
67 * Similarly as in cls_bpf, if filter_res == -1 we use the
68 * default action specified from tc.
70 * In case a different well-known TC_ACT opcode has been
71 * returned, it will overwrite the default one.
73 * For everything else that is unkown, TC_ACT_UNSPEC is
78 case TC_ACT_RECLASSIFY
:
85 qstats_drop_inc(this_cpu_ptr(prog
->common
.cpu_qstats
));
88 action
= prog
->tcf_action
;
91 action
= TC_ACT_UNSPEC
;
98 static bool tcf_bpf_is_ebpf(const struct tcf_bpf
*prog
)
100 return !prog
->bpf_ops
;
103 static int tcf_bpf_dump_bpf_info(const struct tcf_bpf
*prog
,
108 if (nla_put_u16(skb
, TCA_ACT_BPF_OPS_LEN
, prog
->bpf_num_ops
))
111 nla
= nla_reserve(skb
, TCA_ACT_BPF_OPS
, prog
->bpf_num_ops
*
112 sizeof(struct sock_filter
));
116 memcpy(nla_data(nla
), prog
->bpf_ops
, nla_len(nla
));
121 static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf
*prog
,
124 if (nla_put_u32(skb
, TCA_ACT_BPF_FD
, prog
->bpf_fd
))
127 if (prog
->bpf_name
&&
128 nla_put_string(skb
, TCA_ACT_BPF_NAME
, prog
->bpf_name
))
134 static int tcf_bpf_dump(struct sk_buff
*skb
, struct tc_action
*act
,
137 unsigned char *tp
= skb_tail_pointer(skb
);
138 struct tcf_bpf
*prog
= to_bpf(act
);
139 struct tc_act_bpf opt
= {
140 .index
= prog
->tcf_index
,
141 .refcnt
= prog
->tcf_refcnt
- ref
,
142 .bindcnt
= prog
->tcf_bindcnt
- bind
,
143 .action
= prog
->tcf_action
,
148 if (nla_put(skb
, TCA_ACT_BPF_PARMS
, sizeof(opt
), &opt
))
149 goto nla_put_failure
;
151 if (tcf_bpf_is_ebpf(prog
))
152 ret
= tcf_bpf_dump_ebpf_info(prog
, skb
);
154 ret
= tcf_bpf_dump_bpf_info(prog
, skb
);
156 goto nla_put_failure
;
158 tcf_tm_dump(&tm
, &prog
->tcf_tm
);
159 if (nla_put_64bit(skb
, TCA_ACT_BPF_TM
, sizeof(tm
), &tm
,
161 goto nla_put_failure
;
170 static const struct nla_policy act_bpf_policy
[TCA_ACT_BPF_MAX
+ 1] = {
171 [TCA_ACT_BPF_PARMS
] = { .len
= sizeof(struct tc_act_bpf
) },
172 [TCA_ACT_BPF_FD
] = { .type
= NLA_U32
},
173 [TCA_ACT_BPF_NAME
] = { .type
= NLA_NUL_STRING
,
174 .len
= ACT_BPF_NAME_LEN
},
175 [TCA_ACT_BPF_OPS_LEN
] = { .type
= NLA_U16
},
176 [TCA_ACT_BPF_OPS
] = { .type
= NLA_BINARY
,
177 .len
= sizeof(struct sock_filter
) * BPF_MAXINSNS
},
180 static int tcf_bpf_init_from_ops(struct nlattr
**tb
, struct tcf_bpf_cfg
*cfg
)
182 struct sock_filter
*bpf_ops
;
183 struct sock_fprog_kern fprog_tmp
;
185 u16 bpf_size
, bpf_num_ops
;
188 bpf_num_ops
= nla_get_u16(tb
[TCA_ACT_BPF_OPS_LEN
]);
189 if (bpf_num_ops
> BPF_MAXINSNS
|| bpf_num_ops
== 0)
192 bpf_size
= bpf_num_ops
* sizeof(*bpf_ops
);
193 if (bpf_size
!= nla_len(tb
[TCA_ACT_BPF_OPS
]))
196 bpf_ops
= kzalloc(bpf_size
, GFP_KERNEL
);
200 memcpy(bpf_ops
, nla_data(tb
[TCA_ACT_BPF_OPS
]), bpf_size
);
202 fprog_tmp
.len
= bpf_num_ops
;
203 fprog_tmp
.filter
= bpf_ops
;
205 ret
= bpf_prog_create(&fp
, &fprog_tmp
);
211 cfg
->bpf_ops
= bpf_ops
;
212 cfg
->bpf_num_ops
= bpf_num_ops
;
214 cfg
->is_ebpf
= false;
219 static int tcf_bpf_init_from_efd(struct nlattr
**tb
, struct tcf_bpf_cfg
*cfg
)
225 bpf_fd
= nla_get_u32(tb
[TCA_ACT_BPF_FD
]);
227 fp
= bpf_prog_get_type(bpf_fd
, BPF_PROG_TYPE_SCHED_ACT
);
231 if (tb
[TCA_ACT_BPF_NAME
]) {
232 name
= kmemdup(nla_data(tb
[TCA_ACT_BPF_NAME
]),
233 nla_len(tb
[TCA_ACT_BPF_NAME
]),
241 cfg
->bpf_fd
= bpf_fd
;
242 cfg
->bpf_name
= name
;
249 static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg
*cfg
)
252 bpf_prog_put(cfg
->filter
);
254 bpf_prog_destroy(cfg
->filter
);
257 kfree(cfg
->bpf_name
);
260 static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf
*prog
,
261 struct tcf_bpf_cfg
*cfg
)
263 cfg
->is_ebpf
= tcf_bpf_is_ebpf(prog
);
264 /* updates to prog->filter are prevented, since it's called either
265 * with rtnl lock or during final cleanup in rcu callback
267 cfg
->filter
= rcu_dereference_protected(prog
->filter
, 1);
269 cfg
->bpf_ops
= prog
->bpf_ops
;
270 cfg
->bpf_name
= prog
->bpf_name
;
273 static int tcf_bpf_init(struct net
*net
, struct nlattr
*nla
,
274 struct nlattr
*est
, struct tc_action
**act
,
275 int replace
, int bind
)
277 struct tc_action_net
*tn
= net_generic(net
, bpf_net_id
);
278 struct nlattr
*tb
[TCA_ACT_BPF_MAX
+ 1];
279 struct tcf_bpf_cfg cfg
, old
;
280 struct tc_act_bpf
*parm
;
281 struct tcf_bpf
*prog
;
282 bool is_bpf
, is_ebpf
;
288 ret
= nla_parse_nested(tb
, TCA_ACT_BPF_MAX
, nla
, act_bpf_policy
);
292 if (!tb
[TCA_ACT_BPF_PARMS
])
295 parm
= nla_data(tb
[TCA_ACT_BPF_PARMS
]);
297 if (!tcf_hash_check(tn
, parm
->index
, act
, bind
)) {
298 ret
= tcf_hash_create(tn
, parm
->index
, est
, act
,
299 &act_bpf_ops
, bind
, true);
305 /* Don't override defaults. */
309 tcf_hash_release(*act
, bind
);
314 is_bpf
= tb
[TCA_ACT_BPF_OPS_LEN
] && tb
[TCA_ACT_BPF_OPS
];
315 is_ebpf
= tb
[TCA_ACT_BPF_FD
];
317 if ((!is_bpf
&& !is_ebpf
) || (is_bpf
&& is_ebpf
)) {
322 memset(&cfg
, 0, sizeof(cfg
));
324 ret
= is_bpf
? tcf_bpf_init_from_ops(tb
, &cfg
) :
325 tcf_bpf_init_from_efd(tb
, &cfg
);
332 if (res
!= ACT_P_CREATED
)
333 tcf_bpf_prog_fill_cfg(prog
, &old
);
335 prog
->bpf_ops
= cfg
.bpf_ops
;
336 prog
->bpf_name
= cfg
.bpf_name
;
339 prog
->bpf_num_ops
= cfg
.bpf_num_ops
;
341 prog
->bpf_fd
= cfg
.bpf_fd
;
343 prog
->tcf_action
= parm
->action
;
344 rcu_assign_pointer(prog
->filter
, cfg
.filter
);
346 if (res
== ACT_P_CREATED
) {
347 tcf_hash_insert(tn
, *act
);
349 /* make sure the program being replaced is no longer executing */
351 tcf_bpf_cfg_cleanup(&old
);
356 if (res
== ACT_P_CREATED
)
357 tcf_hash_cleanup(*act
, est
);
362 static void tcf_bpf_cleanup(struct tc_action
*act
, int bind
)
364 struct tcf_bpf_cfg tmp
;
366 tcf_bpf_prog_fill_cfg(to_bpf(act
), &tmp
);
367 tcf_bpf_cfg_cleanup(&tmp
);
370 static int tcf_bpf_walker(struct net
*net
, struct sk_buff
*skb
,
371 struct netlink_callback
*cb
, int type
,
372 const struct tc_action_ops
*ops
)
374 struct tc_action_net
*tn
= net_generic(net
, bpf_net_id
);
376 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
);
379 static int tcf_bpf_search(struct net
*net
, struct tc_action
**a
, u32 index
)
381 struct tc_action_net
*tn
= net_generic(net
, bpf_net_id
);
383 return tcf_hash_search(tn
, a
, index
);
386 static struct tc_action_ops act_bpf_ops __read_mostly
= {
389 .owner
= THIS_MODULE
,
391 .dump
= tcf_bpf_dump
,
392 .cleanup
= tcf_bpf_cleanup
,
393 .init
= tcf_bpf_init
,
394 .walk
= tcf_bpf_walker
,
395 .lookup
= tcf_bpf_search
,
396 .size
= sizeof(struct tcf_bpf
),
399 static __net_init
int bpf_init_net(struct net
*net
)
401 struct tc_action_net
*tn
= net_generic(net
, bpf_net_id
);
403 return tc_action_net_init(tn
, &act_bpf_ops
, BPF_TAB_MASK
);
406 static void __net_exit
bpf_exit_net(struct net
*net
)
408 struct tc_action_net
*tn
= net_generic(net
, bpf_net_id
);
410 tc_action_net_exit(tn
);
413 static struct pernet_operations bpf_net_ops
= {
414 .init
= bpf_init_net
,
415 .exit
= bpf_exit_net
,
417 .size
= sizeof(struct tc_action_net
),
420 static int __init
bpf_init_module(void)
422 return tcf_register_action(&act_bpf_ops
, &bpf_net_ops
);
425 static void __exit
bpf_cleanup_module(void)
427 tcf_unregister_action(&act_bpf_ops
, &bpf_net_ops
);
430 module_init(bpf_init_module
);
431 module_exit(bpf_cleanup_module
);
433 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
434 MODULE_DESCRIPTION("TC BPF based action");
435 MODULE_LICENSE("GPL v2");