Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
[linux/fpc-iii.git] / net / sched / act_police.c
blobfed47b658837aac39b6d29c4b893490a4ca42e90
1 /*
2 * net/sched/police.c Input police filter.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * J Hadi Salim (action changes)
13 #include <asm/uaccess.h>
14 #include <asm/system.h>
15 #include <linux/bitops.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/string.h>
21 #include <linux/mm.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
24 #include <linux/in.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/netdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/module.h>
30 #include <linux/rtnetlink.h>
31 #include <linux/init.h>
32 #include <net/sock.h>
33 #include <net/act_api.h>
35 #define L2T(p,L) ((p)->tcfp_R_tab->data[(L)>>(p)->tcfp_R_tab->rate.cell_log])
36 #define L2T_P(p,L) ((p)->tcfp_P_tab->data[(L)>>(p)->tcfp_P_tab->rate.cell_log])
38 #define POL_TAB_MASK 15
39 static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
40 static u32 police_idx_gen;
41 static DEFINE_RWLOCK(police_lock);
43 static struct tcf_hashinfo police_hash_info = {
44 .htab = tcf_police_ht,
45 .hmask = POL_TAB_MASK,
46 .lock = &police_lock,
49 /* Each policer is serialized by its individual spinlock */
51 #ifdef CONFIG_NET_CLS_ACT
52 static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
53 int type, struct tc_action *a)
55 struct tcf_common *p;
56 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
57 struct rtattr *r;
59 read_lock(&police_lock);
61 s_i = cb->args[0];
63 for (i = 0; i < (POL_TAB_MASK + 1); i++) {
64 p = tcf_police_ht[tcf_hash(i, POL_TAB_MASK)];
66 for (; p; p = p->tcfc_next) {
67 index++;
68 if (index < s_i)
69 continue;
70 a->priv = p;
71 a->order = index;
72 r = (struct rtattr*) skb->tail;
73 RTA_PUT(skb, a->order, 0, NULL);
74 if (type == RTM_DELACTION)
75 err = tcf_action_dump_1(skb, a, 0, 1);
76 else
77 err = tcf_action_dump_1(skb, a, 0, 0);
78 if (err < 0) {
79 index--;
80 skb_trim(skb, (u8*)r - skb->data);
81 goto done;
83 r->rta_len = skb->tail - (u8*)r;
84 n_i++;
87 done:
88 read_unlock(&police_lock);
89 if (n_i)
90 cb->args[0] += n_i;
91 return n_i;
93 rtattr_failure:
94 skb_trim(skb, (u8*)r - skb->data);
95 goto done;
97 #endif
99 void tcf_police_destroy(struct tcf_police *p)
101 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
102 struct tcf_common **p1p;
104 for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
105 if (*p1p == &p->common) {
106 write_lock_bh(&police_lock);
107 *p1p = p->tcf_next;
108 write_unlock_bh(&police_lock);
109 #ifdef CONFIG_NET_ESTIMATOR
110 gen_kill_estimator(&p->tcf_bstats,
111 &p->tcf_rate_est);
112 #endif
113 if (p->tcfp_R_tab)
114 qdisc_put_rtab(p->tcfp_R_tab);
115 if (p->tcfp_P_tab)
116 qdisc_put_rtab(p->tcfp_P_tab);
117 kfree(p);
118 return;
121 BUG_TRAP(0);
124 #ifdef CONFIG_NET_CLS_ACT
125 static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
126 struct tc_action *a, int ovr, int bind)
128 unsigned h;
129 int ret = 0, err;
130 struct rtattr *tb[TCA_POLICE_MAX];
131 struct tc_police *parm;
132 struct tcf_police *police;
133 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
135 if (rta == NULL || rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
136 return -EINVAL;
138 if (tb[TCA_POLICE_TBF-1] == NULL ||
139 RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]) != sizeof(*parm))
140 return -EINVAL;
141 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
143 if (tb[TCA_POLICE_RESULT-1] != NULL &&
144 RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
145 return -EINVAL;
146 if (tb[TCA_POLICE_RESULT-1] != NULL &&
147 RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
148 return -EINVAL;
150 if (parm->index) {
151 struct tcf_common *pc;
153 pc = tcf_hash_lookup(parm->index, &police_hash_info);
154 if (pc != NULL) {
155 a->priv = pc;
156 police = to_police(pc);
157 if (bind) {
158 police->tcf_bindcnt += 1;
159 police->tcf_refcnt += 1;
161 if (ovr)
162 goto override;
163 return ret;
167 police = kzalloc(sizeof(*police), GFP_KERNEL);
168 if (police == NULL)
169 return -ENOMEM;
170 ret = ACT_P_CREATED;
171 police->tcf_refcnt = 1;
172 spin_lock_init(&police->tcf_lock);
173 police->tcf_stats_lock = &police->tcf_lock;
174 if (bind)
175 police->tcf_bindcnt = 1;
176 override:
177 if (parm->rate.rate) {
178 err = -ENOMEM;
179 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
180 if (R_tab == NULL)
181 goto failure;
182 if (parm->peakrate.rate) {
183 P_tab = qdisc_get_rtab(&parm->peakrate,
184 tb[TCA_POLICE_PEAKRATE-1]);
185 if (P_tab == NULL) {
186 qdisc_put_rtab(R_tab);
187 goto failure;
191 /* No failure allowed after this point */
192 spin_lock_bh(&police->tcf_lock);
193 if (R_tab != NULL) {
194 qdisc_put_rtab(police->tcfp_R_tab);
195 police->tcfp_R_tab = R_tab;
197 if (P_tab != NULL) {
198 qdisc_put_rtab(police->tcfp_P_tab);
199 police->tcfp_P_tab = P_tab;
202 if (tb[TCA_POLICE_RESULT-1])
203 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
204 police->tcfp_toks = police->tcfp_burst = parm->burst;
205 police->tcfp_mtu = parm->mtu;
206 if (police->tcfp_mtu == 0) {
207 police->tcfp_mtu = ~0;
208 if (police->tcfp_R_tab)
209 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
211 if (police->tcfp_P_tab)
212 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
213 police->tcf_action = parm->action;
215 #ifdef CONFIG_NET_ESTIMATOR
216 if (tb[TCA_POLICE_AVRATE-1])
217 police->tcfp_ewma_rate =
218 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
219 if (est)
220 gen_replace_estimator(&police->tcf_bstats,
221 &police->tcf_rate_est,
222 police->tcf_stats_lock, est);
223 #endif
225 spin_unlock_bh(&police->tcf_lock);
226 if (ret != ACT_P_CREATED)
227 return ret;
229 PSCHED_GET_TIME(police->tcfp_t_c);
230 police->tcf_index = parm->index ? parm->index :
231 tcf_hash_new_index(&police_idx_gen, &police_hash_info);
232 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
233 write_lock_bh(&police_lock);
234 police->tcf_next = tcf_police_ht[h];
235 tcf_police_ht[h] = &police->common;
236 write_unlock_bh(&police_lock);
238 a->priv = police;
239 return ret;
241 failure:
242 if (ret == ACT_P_CREATED)
243 kfree(police);
244 return err;
247 static int tcf_act_police_cleanup(struct tc_action *a, int bind)
249 struct tcf_police *p = a->priv;
251 if (p != NULL)
252 return tcf_police_release(p, bind);
253 return 0;
256 static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
257 struct tcf_result *res)
259 struct tcf_police *police = a->priv;
260 psched_time_t now;
261 long toks;
262 long ptoks = 0;
264 spin_lock(&police->tcf_lock);
266 police->tcf_bstats.bytes += skb->len;
267 police->tcf_bstats.packets++;
269 #ifdef CONFIG_NET_ESTIMATOR
270 if (police->tcfp_ewma_rate &&
271 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
272 police->tcf_qstats.overlimits++;
273 spin_unlock(&police->tcf_lock);
274 return police->tcf_action;
276 #endif
278 if (skb->len <= police->tcfp_mtu) {
279 if (police->tcfp_R_tab == NULL) {
280 spin_unlock(&police->tcf_lock);
281 return police->tcfp_result;
284 PSCHED_GET_TIME(now);
286 toks = PSCHED_TDIFF_SAFE(now, police->tcfp_t_c,
287 police->tcfp_burst);
288 if (police->tcfp_P_tab) {
289 ptoks = toks + police->tcfp_ptoks;
290 if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
291 ptoks = (long)L2T_P(police, police->tcfp_mtu);
292 ptoks -= L2T_P(police, skb->len);
294 toks += police->tcfp_toks;
295 if (toks > (long)police->tcfp_burst)
296 toks = police->tcfp_burst;
297 toks -= L2T(police, skb->len);
298 if ((toks|ptoks) >= 0) {
299 police->tcfp_t_c = now;
300 police->tcfp_toks = toks;
301 police->tcfp_ptoks = ptoks;
302 spin_unlock(&police->tcf_lock);
303 return police->tcfp_result;
307 police->tcf_qstats.overlimits++;
308 spin_unlock(&police->tcf_lock);
309 return police->tcf_action;
312 static int
313 tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
315 unsigned char *b = skb->tail;
316 struct tcf_police *police = a->priv;
317 struct tc_police opt;
319 opt.index = police->tcf_index;
320 opt.action = police->tcf_action;
321 opt.mtu = police->tcfp_mtu;
322 opt.burst = police->tcfp_burst;
323 opt.refcnt = police->tcf_refcnt - ref;
324 opt.bindcnt = police->tcf_bindcnt - bind;
325 if (police->tcfp_R_tab)
326 opt.rate = police->tcfp_R_tab->rate;
327 else
328 memset(&opt.rate, 0, sizeof(opt.rate));
329 if (police->tcfp_P_tab)
330 opt.peakrate = police->tcfp_P_tab->rate;
331 else
332 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
333 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
334 if (police->tcfp_result)
335 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
336 &police->tcfp_result);
337 #ifdef CONFIG_NET_ESTIMATOR
338 if (police->tcfp_ewma_rate)
339 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
340 #endif
341 return skb->len;
343 rtattr_failure:
344 skb_trim(skb, b - skb->data);
345 return -1;
348 MODULE_AUTHOR("Alexey Kuznetsov");
349 MODULE_DESCRIPTION("Policing actions");
350 MODULE_LICENSE("GPL");
352 static struct tc_action_ops act_police_ops = {
353 .kind = "police",
354 .hinfo = &police_hash_info,
355 .type = TCA_ID_POLICE,
356 .capab = TCA_CAP_NONE,
357 .owner = THIS_MODULE,
358 .act = tcf_act_police,
359 .dump = tcf_act_police_dump,
360 .cleanup = tcf_act_police_cleanup,
361 .lookup = tcf_hash_search,
362 .init = tcf_act_police_locate,
363 .walk = tcf_act_police_walker
366 static int __init
367 police_init_module(void)
369 return tcf_register_action(&act_police_ops);
372 static void __exit
373 police_cleanup_module(void)
375 tcf_unregister_action(&act_police_ops);
378 module_init(police_init_module);
379 module_exit(police_cleanup_module);
381 #else /* CONFIG_NET_CLS_ACT */
383 static struct tcf_common *tcf_police_lookup(u32 index)
385 struct tcf_hashinfo *hinfo = &police_hash_info;
386 struct tcf_common *p;
388 read_lock(hinfo->lock);
389 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
390 p = p->tcfc_next) {
391 if (p->tcfc_index == index)
392 break;
394 read_unlock(hinfo->lock);
396 return p;
399 static u32 tcf_police_new_index(void)
401 u32 *idx_gen = &police_idx_gen;
402 u32 val = *idx_gen;
404 do {
405 if (++val == 0)
406 val = 1;
407 } while (tcf_police_lookup(val));
409 return (*idx_gen = val);
412 struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
414 unsigned int h;
415 struct tcf_police *police;
416 struct rtattr *tb[TCA_POLICE_MAX];
417 struct tc_police *parm;
419 if (rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
420 return NULL;
422 if (tb[TCA_POLICE_TBF-1] == NULL ||
423 RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]) != sizeof(*parm))
424 return NULL;
426 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
428 if (parm->index) {
429 struct tcf_common *pc;
431 pc = tcf_police_lookup(parm->index);
432 if (pc) {
433 police = to_police(pc);
434 police->tcf_refcnt++;
435 return police;
438 police = kzalloc(sizeof(*police), GFP_KERNEL);
439 if (unlikely(!police))
440 return NULL;
442 police->tcf_refcnt = 1;
443 spin_lock_init(&police->tcf_lock);
444 police->tcf_stats_lock = &police->tcf_lock;
445 if (parm->rate.rate) {
446 police->tcfp_R_tab =
447 qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
448 if (police->tcfp_R_tab == NULL)
449 goto failure;
450 if (parm->peakrate.rate) {
451 police->tcfp_P_tab =
452 qdisc_get_rtab(&parm->peakrate,
453 tb[TCA_POLICE_PEAKRATE-1]);
454 if (police->tcfp_P_tab == NULL)
455 goto failure;
458 if (tb[TCA_POLICE_RESULT-1]) {
459 if (RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
460 goto failure;
461 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
463 #ifdef CONFIG_NET_ESTIMATOR
464 if (tb[TCA_POLICE_AVRATE-1]) {
465 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
466 goto failure;
467 police->tcfp_ewma_rate =
468 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
470 #endif
471 police->tcfp_toks = police->tcfp_burst = parm->burst;
472 police->tcfp_mtu = parm->mtu;
473 if (police->tcfp_mtu == 0) {
474 police->tcfp_mtu = ~0;
475 if (police->tcfp_R_tab)
476 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
478 if (police->tcfp_P_tab)
479 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
480 PSCHED_GET_TIME(police->tcfp_t_c);
481 police->tcf_index = parm->index ? parm->index :
482 tcf_police_new_index();
483 police->tcf_action = parm->action;
484 #ifdef CONFIG_NET_ESTIMATOR
485 if (est)
486 gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est,
487 police->tcf_stats_lock, est);
488 #endif
489 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
490 write_lock_bh(&police_lock);
491 police->tcf_next = tcf_police_ht[h];
492 tcf_police_ht[h] = &police->common;
493 write_unlock_bh(&police_lock);
494 return police;
496 failure:
497 if (police->tcfp_R_tab)
498 qdisc_put_rtab(police->tcfp_R_tab);
499 kfree(police);
500 return NULL;
503 int tcf_police(struct sk_buff *skb, struct tcf_police *police)
505 psched_time_t now;
506 long toks;
507 long ptoks = 0;
509 spin_lock(&police->tcf_lock);
511 police->tcf_bstats.bytes += skb->len;
512 police->tcf_bstats.packets++;
514 #ifdef CONFIG_NET_ESTIMATOR
515 if (police->tcfp_ewma_rate &&
516 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
517 police->tcf_qstats.overlimits++;
518 spin_unlock(&police->tcf_lock);
519 return police->tcf_action;
521 #endif
522 if (skb->len <= police->tcfp_mtu) {
523 if (police->tcfp_R_tab == NULL) {
524 spin_unlock(&police->tcf_lock);
525 return police->tcfp_result;
528 PSCHED_GET_TIME(now);
529 toks = PSCHED_TDIFF_SAFE(now, police->tcfp_t_c,
530 police->tcfp_burst);
531 if (police->tcfp_P_tab) {
532 ptoks = toks + police->tcfp_ptoks;
533 if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
534 ptoks = (long)L2T_P(police, police->tcfp_mtu);
535 ptoks -= L2T_P(police, skb->len);
537 toks += police->tcfp_toks;
538 if (toks > (long)police->tcfp_burst)
539 toks = police->tcfp_burst;
540 toks -= L2T(police, skb->len);
541 if ((toks|ptoks) >= 0) {
542 police->tcfp_t_c = now;
543 police->tcfp_toks = toks;
544 police->tcfp_ptoks = ptoks;
545 spin_unlock(&police->tcf_lock);
546 return police->tcfp_result;
550 police->tcf_qstats.overlimits++;
551 spin_unlock(&police->tcf_lock);
552 return police->tcf_action;
554 EXPORT_SYMBOL(tcf_police);
556 int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police)
558 unsigned char *b = skb->tail;
559 struct tc_police opt;
561 opt.index = police->tcf_index;
562 opt.action = police->tcf_action;
563 opt.mtu = police->tcfp_mtu;
564 opt.burst = police->tcfp_burst;
565 if (police->tcfp_R_tab)
566 opt.rate = police->tcfp_R_tab->rate;
567 else
568 memset(&opt.rate, 0, sizeof(opt.rate));
569 if (police->tcfp_P_tab)
570 opt.peakrate = police->tcfp_P_tab->rate;
571 else
572 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
573 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
574 if (police->tcfp_result)
575 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
576 &police->tcfp_result);
577 #ifdef CONFIG_NET_ESTIMATOR
578 if (police->tcfp_ewma_rate)
579 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
580 #endif
581 return skb->len;
583 rtattr_failure:
584 skb_trim(skb, b - skb->data);
585 return -1;
588 int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police)
590 struct gnet_dump d;
592 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
593 TCA_XSTATS, police->tcf_stats_lock,
594 &d) < 0)
595 goto errout;
597 if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 ||
598 #ifdef CONFIG_NET_ESTIMATOR
599 gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 ||
600 #endif
601 gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0)
602 goto errout;
604 if (gnet_stats_finish_copy(&d) < 0)
605 goto errout;
607 return 0;
609 errout:
610 return -1;
613 #endif /* CONFIG_NET_CLS_ACT */