seccomp: Fix ioctl number for SECCOMP_IOCTL_NOTIF_ID_VALID
[linux/fpc-iii.git] / net / sched / act_police.c
blob89c04c52af3dab3d7fc1fbd1d08a3f112c33ac72
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/act_police.c Input police filter
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 * J Hadi Salim (action changes)
7 */
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <net/act_api.h>
19 #include <net/netlink.h>
20 #include <net/pkt_cls.h>
21 #include <net/tc_act/tc_police.h>
23 /* Each policer is serialized by its individual spinlock */
25 static unsigned int police_net_id;
26 static struct tc_action_ops act_police_ops;
28 static int tcf_police_walker(struct net *net, struct sk_buff *skb,
29 struct netlink_callback *cb, int type,
30 const struct tc_action_ops *ops,
31 struct netlink_ext_ack *extack)
33 struct tc_action_net *tn = net_generic(net, police_net_id);
35 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
38 static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
39 [TCA_POLICE_RATE] = { .len = TC_RTAB_SIZE },
40 [TCA_POLICE_PEAKRATE] = { .len = TC_RTAB_SIZE },
41 [TCA_POLICE_AVRATE] = { .type = NLA_U32 },
42 [TCA_POLICE_RESULT] = { .type = NLA_U32 },
43 [TCA_POLICE_RATE64] = { .type = NLA_U64 },
44 [TCA_POLICE_PEAKRATE64] = { .type = NLA_U64 },
47 static int tcf_police_init(struct net *net, struct nlattr *nla,
48 struct nlattr *est, struct tc_action **a,
49 int ovr, int bind, bool rtnl_held,
50 struct tcf_proto *tp,
51 struct netlink_ext_ack *extack)
53 int ret = 0, tcfp_result = TC_ACT_OK, err, size;
54 struct nlattr *tb[TCA_POLICE_MAX + 1];
55 struct tcf_chain *goto_ch = NULL;
56 struct tc_police *parm;
57 struct tcf_police *police;
58 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
59 struct tc_action_net *tn = net_generic(net, police_net_id);
60 struct tcf_police_params *new;
61 bool exists = false;
62 u32 index;
63 u64 rate64, prate64;
65 if (nla == NULL)
66 return -EINVAL;
68 err = nla_parse_nested_deprecated(tb, TCA_POLICE_MAX, nla,
69 police_policy, NULL);
70 if (err < 0)
71 return err;
73 if (tb[TCA_POLICE_TBF] == NULL)
74 return -EINVAL;
75 size = nla_len(tb[TCA_POLICE_TBF]);
76 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
77 return -EINVAL;
79 parm = nla_data(tb[TCA_POLICE_TBF]);
80 index = parm->index;
81 err = tcf_idr_check_alloc(tn, &index, a, bind);
82 if (err < 0)
83 return err;
84 exists = err;
85 if (exists && bind)
86 return 0;
88 if (!exists) {
89 ret = tcf_idr_create(tn, index, NULL, a,
90 &act_police_ops, bind, true);
91 if (ret) {
92 tcf_idr_cleanup(tn, index);
93 return ret;
95 ret = ACT_P_CREATED;
96 spin_lock_init(&(to_police(*a)->tcfp_lock));
97 } else if (!ovr) {
98 tcf_idr_release(*a, bind);
99 return -EEXIST;
101 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
102 if (err < 0)
103 goto release_idr;
105 police = to_police(*a);
106 if (parm->rate.rate) {
107 err = -ENOMEM;
108 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL);
109 if (R_tab == NULL)
110 goto failure;
112 if (parm->peakrate.rate) {
113 P_tab = qdisc_get_rtab(&parm->peakrate,
114 tb[TCA_POLICE_PEAKRATE], NULL);
115 if (P_tab == NULL)
116 goto failure;
120 if (est) {
121 err = gen_replace_estimator(&police->tcf_bstats,
122 police->common.cpu_bstats,
123 &police->tcf_rate_est,
124 &police->tcf_lock,
125 NULL, est);
126 if (err)
127 goto failure;
128 } else if (tb[TCA_POLICE_AVRATE] &&
129 (ret == ACT_P_CREATED ||
130 !gen_estimator_active(&police->tcf_rate_est))) {
131 err = -EINVAL;
132 goto failure;
135 if (tb[TCA_POLICE_RESULT]) {
136 tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
137 if (TC_ACT_EXT_CMP(tcfp_result, TC_ACT_GOTO_CHAIN)) {
138 NL_SET_ERR_MSG(extack,
139 "goto chain not allowed on fallback");
140 err = -EINVAL;
141 goto failure;
145 new = kzalloc(sizeof(*new), GFP_KERNEL);
146 if (unlikely(!new)) {
147 err = -ENOMEM;
148 goto failure;
151 /* No failure allowed after this point */
152 new->tcfp_result = tcfp_result;
153 new->tcfp_mtu = parm->mtu;
154 if (!new->tcfp_mtu) {
155 new->tcfp_mtu = ~0;
156 if (R_tab)
157 new->tcfp_mtu = 255 << R_tab->rate.cell_log;
159 if (R_tab) {
160 new->rate_present = true;
161 rate64 = tb[TCA_POLICE_RATE64] ?
162 nla_get_u64(tb[TCA_POLICE_RATE64]) : 0;
163 psched_ratecfg_precompute(&new->rate, &R_tab->rate, rate64);
164 qdisc_put_rtab(R_tab);
165 } else {
166 new->rate_present = false;
168 if (P_tab) {
169 new->peak_present = true;
170 prate64 = tb[TCA_POLICE_PEAKRATE64] ?
171 nla_get_u64(tb[TCA_POLICE_PEAKRATE64]) : 0;
172 psched_ratecfg_precompute(&new->peak, &P_tab->rate, prate64);
173 qdisc_put_rtab(P_tab);
174 } else {
175 new->peak_present = false;
178 new->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
179 if (new->peak_present)
180 new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak,
181 new->tcfp_mtu);
183 if (tb[TCA_POLICE_AVRATE])
184 new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
186 spin_lock_bh(&police->tcf_lock);
187 spin_lock_bh(&police->tcfp_lock);
188 police->tcfp_t_c = ktime_get_ns();
189 police->tcfp_toks = new->tcfp_burst;
190 if (new->peak_present)
191 police->tcfp_ptoks = new->tcfp_mtu_ptoks;
192 spin_unlock_bh(&police->tcfp_lock);
193 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
194 rcu_swap_protected(police->params,
195 new,
196 lockdep_is_held(&police->tcf_lock));
197 spin_unlock_bh(&police->tcf_lock);
199 if (goto_ch)
200 tcf_chain_put_by_act(goto_ch);
201 if (new)
202 kfree_rcu(new, rcu);
204 if (ret == ACT_P_CREATED)
205 tcf_idr_insert(tn, *a);
206 return ret;
208 failure:
209 qdisc_put_rtab(P_tab);
210 qdisc_put_rtab(R_tab);
211 if (goto_ch)
212 tcf_chain_put_by_act(goto_ch);
213 release_idr:
214 tcf_idr_release(*a, bind);
215 return err;
218 static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
219 struct tcf_result *res)
221 struct tcf_police *police = to_police(a);
222 struct tcf_police_params *p;
223 s64 now, toks, ptoks = 0;
224 int ret;
226 tcf_lastuse_update(&police->tcf_tm);
227 bstats_cpu_update(this_cpu_ptr(police->common.cpu_bstats), skb);
229 ret = READ_ONCE(police->tcf_action);
230 p = rcu_dereference_bh(police->params);
232 if (p->tcfp_ewma_rate) {
233 struct gnet_stats_rate_est64 sample;
235 if (!gen_estimator_read(&police->tcf_rate_est, &sample) ||
236 sample.bps >= p->tcfp_ewma_rate)
237 goto inc_overlimits;
240 if (qdisc_pkt_len(skb) <= p->tcfp_mtu) {
241 if (!p->rate_present) {
242 ret = p->tcfp_result;
243 goto end;
246 now = ktime_get_ns();
247 spin_lock_bh(&police->tcfp_lock);
248 toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst);
249 if (p->peak_present) {
250 ptoks = toks + police->tcfp_ptoks;
251 if (ptoks > p->tcfp_mtu_ptoks)
252 ptoks = p->tcfp_mtu_ptoks;
253 ptoks -= (s64)psched_l2t_ns(&p->peak,
254 qdisc_pkt_len(skb));
256 toks += police->tcfp_toks;
257 if (toks > p->tcfp_burst)
258 toks = p->tcfp_burst;
259 toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
260 if ((toks|ptoks) >= 0) {
261 police->tcfp_t_c = now;
262 police->tcfp_toks = toks;
263 police->tcfp_ptoks = ptoks;
264 spin_unlock_bh(&police->tcfp_lock);
265 ret = p->tcfp_result;
266 goto inc_drops;
268 spin_unlock_bh(&police->tcfp_lock);
271 inc_overlimits:
272 qstats_overlimit_inc(this_cpu_ptr(police->common.cpu_qstats));
273 inc_drops:
274 if (ret == TC_ACT_SHOT)
275 qstats_drop_inc(this_cpu_ptr(police->common.cpu_qstats));
276 end:
277 return ret;
280 static void tcf_police_cleanup(struct tc_action *a)
282 struct tcf_police *police = to_police(a);
283 struct tcf_police_params *p;
285 p = rcu_dereference_protected(police->params, 1);
286 if (p)
287 kfree_rcu(p, rcu);
290 static void tcf_police_stats_update(struct tc_action *a,
291 u64 bytes, u32 packets,
292 u64 lastuse, bool hw)
294 struct tcf_police *police = to_police(a);
295 struct tcf_t *tm = &police->tcf_tm;
297 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
298 if (hw)
299 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
300 bytes, packets);
301 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
304 static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
305 int bind, int ref)
307 unsigned char *b = skb_tail_pointer(skb);
308 struct tcf_police *police = to_police(a);
309 struct tcf_police_params *p;
310 struct tc_police opt = {
311 .index = police->tcf_index,
312 .refcnt = refcount_read(&police->tcf_refcnt) - ref,
313 .bindcnt = atomic_read(&police->tcf_bindcnt) - bind,
315 struct tcf_t t;
317 spin_lock_bh(&police->tcf_lock);
318 opt.action = police->tcf_action;
319 p = rcu_dereference_protected(police->params,
320 lockdep_is_held(&police->tcf_lock));
321 opt.mtu = p->tcfp_mtu;
322 opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
323 if (p->rate_present) {
324 psched_ratecfg_getrate(&opt.rate, &p->rate);
325 if ((police->params->rate.rate_bytes_ps >= (1ULL << 32)) &&
326 nla_put_u64_64bit(skb, TCA_POLICE_RATE64,
327 police->params->rate.rate_bytes_ps,
328 TCA_POLICE_PAD))
329 goto nla_put_failure;
331 if (p->peak_present) {
332 psched_ratecfg_getrate(&opt.peakrate, &p->peak);
333 if ((police->params->peak.rate_bytes_ps >= (1ULL << 32)) &&
334 nla_put_u64_64bit(skb, TCA_POLICE_PEAKRATE64,
335 police->params->peak.rate_bytes_ps,
336 TCA_POLICE_PAD))
337 goto nla_put_failure;
339 if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
340 goto nla_put_failure;
341 if (p->tcfp_result &&
342 nla_put_u32(skb, TCA_POLICE_RESULT, p->tcfp_result))
343 goto nla_put_failure;
344 if (p->tcfp_ewma_rate &&
345 nla_put_u32(skb, TCA_POLICE_AVRATE, p->tcfp_ewma_rate))
346 goto nla_put_failure;
348 t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
349 t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse);
350 t.firstuse = jiffies_to_clock_t(jiffies - police->tcf_tm.firstuse);
351 t.expires = jiffies_to_clock_t(police->tcf_tm.expires);
352 if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
353 goto nla_put_failure;
354 spin_unlock_bh(&police->tcf_lock);
356 return skb->len;
358 nla_put_failure:
359 spin_unlock_bh(&police->tcf_lock);
360 nlmsg_trim(skb, b);
361 return -1;
364 static int tcf_police_search(struct net *net, struct tc_action **a, u32 index)
366 struct tc_action_net *tn = net_generic(net, police_net_id);
368 return tcf_idr_search(tn, a, index);
371 MODULE_AUTHOR("Alexey Kuznetsov");
372 MODULE_DESCRIPTION("Policing actions");
373 MODULE_LICENSE("GPL");
375 static struct tc_action_ops act_police_ops = {
376 .kind = "police",
377 .id = TCA_ID_POLICE,
378 .owner = THIS_MODULE,
379 .stats_update = tcf_police_stats_update,
380 .act = tcf_police_act,
381 .dump = tcf_police_dump,
382 .init = tcf_police_init,
383 .walk = tcf_police_walker,
384 .lookup = tcf_police_search,
385 .cleanup = tcf_police_cleanup,
386 .size = sizeof(struct tcf_police),
389 static __net_init int police_init_net(struct net *net)
391 struct tc_action_net *tn = net_generic(net, police_net_id);
393 return tc_action_net_init(net, tn, &act_police_ops);
396 static void __net_exit police_exit_net(struct list_head *net_list)
398 tc_action_net_exit(net_list, police_net_id);
401 static struct pernet_operations police_net_ops = {
402 .init = police_init_net,
403 .exit_batch = police_exit_net,
404 .id = &police_net_id,
405 .size = sizeof(struct tc_action_net),
408 static int __init police_init_module(void)
410 return tcf_register_action(&act_police_ops, &police_net_ops);
413 static void __exit police_cleanup_module(void)
415 tcf_unregister_action(&act_police_ops, &police_net_ops);
418 module_init(police_init_module);
419 module_exit(police_cleanup_module);