1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/kernel.h>
9 #include <linux/skbuff.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/filter.h>
12 #include <linux/bpf.h>
14 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
17 #include <net/pkt_cls.h>
19 #include <linux/tc_act/tc_bpf.h>
20 #include <net/tc_act/tc_bpf.h>
22 #define ACT_BPF_NAME_LEN 256
25 struct bpf_prog
*filter
;
26 struct sock_filter
*bpf_ops
;
32 static unsigned int bpf_net_id
;
33 static struct tc_action_ops act_bpf_ops
;
35 static int tcf_bpf_act(struct sk_buff
*skb
, const struct tc_action
*act
,
36 struct tcf_result
*res
)
38 bool at_ingress
= skb_at_tc_ingress(skb
);
39 struct tcf_bpf
*prog
= to_bpf(act
);
40 struct bpf_prog
*filter
;
41 int action
, filter_res
;
43 tcf_lastuse_update(&prog
->tcf_tm
);
44 bstats_cpu_update(this_cpu_ptr(prog
->common
.cpu_bstats
), skb
);
47 filter
= rcu_dereference(prog
->filter
);
49 __skb_push(skb
, skb
->mac_len
);
50 bpf_compute_data_pointers(skb
);
51 filter_res
= BPF_PROG_RUN(filter
, skb
);
52 __skb_pull(skb
, skb
->mac_len
);
54 bpf_compute_data_pointers(skb
);
55 filter_res
= BPF_PROG_RUN(filter
, skb
);
57 if (skb_sk_is_prefetched(skb
) && filter_res
!= TC_ACT_OK
)
61 /* A BPF program may overwrite the default action opcode.
62 * Similarly as in cls_bpf, if filter_res == -1 we use the
63 * default action specified from tc.
65 * In case a different well-known TC_ACT opcode has been
66 * returned, it will overwrite the default one.
68 * For everything else that is unkown, TC_ACT_UNSPEC is
73 case TC_ACT_RECLASSIFY
:
80 qstats_drop_inc(this_cpu_ptr(prog
->common
.cpu_qstats
));
83 action
= prog
->tcf_action
;
86 action
= TC_ACT_UNSPEC
;
93 static bool tcf_bpf_is_ebpf(const struct tcf_bpf
*prog
)
95 return !prog
->bpf_ops
;
98 static int tcf_bpf_dump_bpf_info(const struct tcf_bpf
*prog
,
103 if (nla_put_u16(skb
, TCA_ACT_BPF_OPS_LEN
, prog
->bpf_num_ops
))
106 nla
= nla_reserve(skb
, TCA_ACT_BPF_OPS
, prog
->bpf_num_ops
*
107 sizeof(struct sock_filter
));
111 memcpy(nla_data(nla
), prog
->bpf_ops
, nla_len(nla
));
116 static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf
*prog
,
121 if (prog
->bpf_name
&&
122 nla_put_string(skb
, TCA_ACT_BPF_NAME
, prog
->bpf_name
))
125 if (nla_put_u32(skb
, TCA_ACT_BPF_ID
, prog
->filter
->aux
->id
))
128 nla
= nla_reserve(skb
, TCA_ACT_BPF_TAG
, sizeof(prog
->filter
->tag
));
132 memcpy(nla_data(nla
), prog
->filter
->tag
, nla_len(nla
));
137 static int tcf_bpf_dump(struct sk_buff
*skb
, struct tc_action
*act
,
140 unsigned char *tp
= skb_tail_pointer(skb
);
141 struct tcf_bpf
*prog
= to_bpf(act
);
142 struct tc_act_bpf opt
= {
143 .index
= prog
->tcf_index
,
144 .refcnt
= refcount_read(&prog
->tcf_refcnt
) - ref
,
145 .bindcnt
= atomic_read(&prog
->tcf_bindcnt
) - bind
,
150 spin_lock_bh(&prog
->tcf_lock
);
151 opt
.action
= prog
->tcf_action
;
152 if (nla_put(skb
, TCA_ACT_BPF_PARMS
, sizeof(opt
), &opt
))
153 goto nla_put_failure
;
155 if (tcf_bpf_is_ebpf(prog
))
156 ret
= tcf_bpf_dump_ebpf_info(prog
, skb
);
158 ret
= tcf_bpf_dump_bpf_info(prog
, skb
);
160 goto nla_put_failure
;
162 tcf_tm_dump(&tm
, &prog
->tcf_tm
);
163 if (nla_put_64bit(skb
, TCA_ACT_BPF_TM
, sizeof(tm
), &tm
,
165 goto nla_put_failure
;
167 spin_unlock_bh(&prog
->tcf_lock
);
171 spin_unlock_bh(&prog
->tcf_lock
);
176 static const struct nla_policy act_bpf_policy
[TCA_ACT_BPF_MAX
+ 1] = {
177 [TCA_ACT_BPF_PARMS
] = { .len
= sizeof(struct tc_act_bpf
) },
178 [TCA_ACT_BPF_FD
] = { .type
= NLA_U32
},
179 [TCA_ACT_BPF_NAME
] = { .type
= NLA_NUL_STRING
,
180 .len
= ACT_BPF_NAME_LEN
},
181 [TCA_ACT_BPF_OPS_LEN
] = { .type
= NLA_U16
},
182 [TCA_ACT_BPF_OPS
] = { .type
= NLA_BINARY
,
183 .len
= sizeof(struct sock_filter
) * BPF_MAXINSNS
},
186 static int tcf_bpf_init_from_ops(struct nlattr
**tb
, struct tcf_bpf_cfg
*cfg
)
188 struct sock_filter
*bpf_ops
;
189 struct sock_fprog_kern fprog_tmp
;
191 u16 bpf_size
, bpf_num_ops
;
194 bpf_num_ops
= nla_get_u16(tb
[TCA_ACT_BPF_OPS_LEN
]);
195 if (bpf_num_ops
> BPF_MAXINSNS
|| bpf_num_ops
== 0)
198 bpf_size
= bpf_num_ops
* sizeof(*bpf_ops
);
199 if (bpf_size
!= nla_len(tb
[TCA_ACT_BPF_OPS
]))
202 bpf_ops
= kmemdup(nla_data(tb
[TCA_ACT_BPF_OPS
]), bpf_size
, GFP_KERNEL
);
206 fprog_tmp
.len
= bpf_num_ops
;
207 fprog_tmp
.filter
= bpf_ops
;
209 ret
= bpf_prog_create(&fp
, &fprog_tmp
);
215 cfg
->bpf_ops
= bpf_ops
;
216 cfg
->bpf_num_ops
= bpf_num_ops
;
218 cfg
->is_ebpf
= false;
223 static int tcf_bpf_init_from_efd(struct nlattr
**tb
, struct tcf_bpf_cfg
*cfg
)
229 bpf_fd
= nla_get_u32(tb
[TCA_ACT_BPF_FD
]);
231 fp
= bpf_prog_get_type(bpf_fd
, BPF_PROG_TYPE_SCHED_ACT
);
235 if (tb
[TCA_ACT_BPF_NAME
]) {
236 name
= nla_memdup(tb
[TCA_ACT_BPF_NAME
], GFP_KERNEL
);
243 cfg
->bpf_name
= name
;
250 static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg
*cfg
)
252 struct bpf_prog
*filter
= cfg
->filter
;
256 bpf_prog_put(filter
);
258 bpf_prog_destroy(filter
);
262 kfree(cfg
->bpf_name
);
265 static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf
*prog
,
266 struct tcf_bpf_cfg
*cfg
)
268 cfg
->is_ebpf
= tcf_bpf_is_ebpf(prog
);
269 /* updates to prog->filter are prevented, since it's called either
270 * with tcf lock or during final cleanup in rcu callback
272 cfg
->filter
= rcu_dereference_protected(prog
->filter
, 1);
274 cfg
->bpf_ops
= prog
->bpf_ops
;
275 cfg
->bpf_name
= prog
->bpf_name
;
278 static int tcf_bpf_init(struct net
*net
, struct nlattr
*nla
,
279 struct nlattr
*est
, struct tc_action
**act
,
280 int replace
, int bind
, bool rtnl_held
,
281 struct tcf_proto
*tp
, u32 flags
,
282 struct netlink_ext_ack
*extack
)
284 struct tc_action_net
*tn
= net_generic(net
, bpf_net_id
);
285 struct nlattr
*tb
[TCA_ACT_BPF_MAX
+ 1];
286 struct tcf_chain
*goto_ch
= NULL
;
287 struct tcf_bpf_cfg cfg
, old
;
288 struct tc_act_bpf
*parm
;
289 struct tcf_bpf
*prog
;
290 bool is_bpf
, is_ebpf
;
297 ret
= nla_parse_nested_deprecated(tb
, TCA_ACT_BPF_MAX
, nla
,
298 act_bpf_policy
, NULL
);
302 if (!tb
[TCA_ACT_BPF_PARMS
])
305 parm
= nla_data(tb
[TCA_ACT_BPF_PARMS
]);
307 ret
= tcf_idr_check_alloc(tn
, &index
, act
, bind
);
309 ret
= tcf_idr_create(tn
, index
, est
, act
,
310 &act_bpf_ops
, bind
, true, 0);
312 tcf_idr_cleanup(tn
, index
);
317 } else if (ret
> 0) {
318 /* Don't override defaults. */
323 tcf_idr_release(*act
, bind
);
330 ret
= tcf_action_check_ctrlact(parm
->action
, tp
, &goto_ch
, extack
);
334 is_bpf
= tb
[TCA_ACT_BPF_OPS_LEN
] && tb
[TCA_ACT_BPF_OPS
];
335 is_ebpf
= tb
[TCA_ACT_BPF_FD
];
337 if ((!is_bpf
&& !is_ebpf
) || (is_bpf
&& is_ebpf
)) {
342 memset(&cfg
, 0, sizeof(cfg
));
344 ret
= is_bpf
? tcf_bpf_init_from_ops(tb
, &cfg
) :
345 tcf_bpf_init_from_efd(tb
, &cfg
);
351 spin_lock_bh(&prog
->tcf_lock
);
352 if (res
!= ACT_P_CREATED
)
353 tcf_bpf_prog_fill_cfg(prog
, &old
);
355 prog
->bpf_ops
= cfg
.bpf_ops
;
356 prog
->bpf_name
= cfg
.bpf_name
;
359 prog
->bpf_num_ops
= cfg
.bpf_num_ops
;
361 goto_ch
= tcf_action_set_ctrlact(*act
, parm
->action
, goto_ch
);
362 rcu_assign_pointer(prog
->filter
, cfg
.filter
);
363 spin_unlock_bh(&prog
->tcf_lock
);
366 tcf_chain_put_by_act(goto_ch
);
368 if (res
== ACT_P_CREATED
) {
369 tcf_idr_insert(tn
, *act
);
371 /* make sure the program being replaced is no longer executing */
373 tcf_bpf_cfg_cleanup(&old
);
380 tcf_chain_put_by_act(goto_ch
);
383 tcf_idr_release(*act
, bind
);
387 static void tcf_bpf_cleanup(struct tc_action
*act
)
389 struct tcf_bpf_cfg tmp
;
391 tcf_bpf_prog_fill_cfg(to_bpf(act
), &tmp
);
392 tcf_bpf_cfg_cleanup(&tmp
);
395 static int tcf_bpf_walker(struct net
*net
, struct sk_buff
*skb
,
396 struct netlink_callback
*cb
, int type
,
397 const struct tc_action_ops
*ops
,
398 struct netlink_ext_ack
*extack
)
400 struct tc_action_net
*tn
= net_generic(net
, bpf_net_id
);
402 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
, extack
);
405 static int tcf_bpf_search(struct net
*net
, struct tc_action
**a
, u32 index
)
407 struct tc_action_net
*tn
= net_generic(net
, bpf_net_id
);
409 return tcf_idr_search(tn
, a
, index
);
412 static struct tc_action_ops act_bpf_ops __read_mostly
= {
415 .owner
= THIS_MODULE
,
417 .dump
= tcf_bpf_dump
,
418 .cleanup
= tcf_bpf_cleanup
,
419 .init
= tcf_bpf_init
,
420 .walk
= tcf_bpf_walker
,
421 .lookup
= tcf_bpf_search
,
422 .size
= sizeof(struct tcf_bpf
),
425 static __net_init
int bpf_init_net(struct net
*net
)
427 struct tc_action_net
*tn
= net_generic(net
, bpf_net_id
);
429 return tc_action_net_init(net
, tn
, &act_bpf_ops
);
432 static void __net_exit
bpf_exit_net(struct list_head
*net_list
)
434 tc_action_net_exit(net_list
, bpf_net_id
);
437 static struct pernet_operations bpf_net_ops
= {
438 .init
= bpf_init_net
,
439 .exit_batch
= bpf_exit_net
,
441 .size
= sizeof(struct tc_action_net
),
444 static int __init
bpf_init_module(void)
446 return tcf_register_action(&act_bpf_ops
, &bpf_net_ops
);
449 static void __exit
bpf_cleanup_module(void)
451 tcf_unregister_action(&act_bpf_ops
, &bpf_net_ops
);
454 module_init(bpf_init_module
);
455 module_exit(bpf_cleanup_module
);
457 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
458 MODULE_DESCRIPTION("TC BPF based action");
459 MODULE_LICENSE("GPL v2");