2 * net/sched/police.c Input police filter.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * J Hadi Salim (action changes)
13 #include <asm/uaccess.h>
14 #include <asm/system.h>
15 #include <linux/bitops.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/string.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/netdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/module.h>
30 #include <linux/rtnetlink.h>
31 #include <linux/init.h>
33 #include <net/act_api.h>
35 #define L2T(p,L) ((p)->tcfp_R_tab->data[(L)>>(p)->tcfp_R_tab->rate.cell_log])
36 #define L2T_P(p,L) ((p)->tcfp_P_tab->data[(L)>>(p)->tcfp_P_tab->rate.cell_log])
38 #define POL_TAB_MASK 15
39 static struct tcf_common
*tcf_police_ht
[POL_TAB_MASK
+ 1];
40 static u32 police_idx_gen
;
41 static DEFINE_RWLOCK(police_lock
);
43 static struct tcf_hashinfo police_hash_info
= {
44 .htab
= tcf_police_ht
,
45 .hmask
= POL_TAB_MASK
,
49 /* Each policer is serialized by its individual spinlock */
51 #ifdef CONFIG_NET_CLS_ACT
52 static int tcf_act_police_walker(struct sk_buff
*skb
, struct netlink_callback
*cb
,
53 int type
, struct tc_action
*a
)
56 int err
= 0, index
= -1, i
= 0, s_i
= 0, n_i
= 0;
59 read_lock(&police_lock
);
63 for (i
= 0; i
< (POL_TAB_MASK
+ 1); i
++) {
64 p
= tcf_police_ht
[tcf_hash(i
, POL_TAB_MASK
)];
66 for (; p
; p
= p
->tcfc_next
) {
72 r
= (struct rtattr
*) skb
->tail
;
73 RTA_PUT(skb
, a
->order
, 0, NULL
);
74 if (type
== RTM_DELACTION
)
75 err
= tcf_action_dump_1(skb
, a
, 0, 1);
77 err
= tcf_action_dump_1(skb
, a
, 0, 0);
80 skb_trim(skb
, (u8
*)r
- skb
->data
);
83 r
->rta_len
= skb
->tail
- (u8
*)r
;
88 read_unlock(&police_lock
);
94 skb_trim(skb
, (u8
*)r
- skb
->data
);
99 void tcf_police_destroy(struct tcf_police
*p
)
101 unsigned int h
= tcf_hash(p
->tcf_index
, POL_TAB_MASK
);
102 struct tcf_common
**p1p
;
104 for (p1p
= &tcf_police_ht
[h
]; *p1p
; p1p
= &(*p1p
)->tcfc_next
) {
105 if (*p1p
== &p
->common
) {
106 write_lock_bh(&police_lock
);
108 write_unlock_bh(&police_lock
);
109 #ifdef CONFIG_NET_ESTIMATOR
110 gen_kill_estimator(&p
->tcf_bstats
,
114 qdisc_put_rtab(p
->tcfp_R_tab
);
116 qdisc_put_rtab(p
->tcfp_P_tab
);
124 #ifdef CONFIG_NET_CLS_ACT
125 static int tcf_act_police_locate(struct rtattr
*rta
, struct rtattr
*est
,
126 struct tc_action
*a
, int ovr
, int bind
)
130 struct rtattr
*tb
[TCA_POLICE_MAX
];
131 struct tc_police
*parm
;
132 struct tcf_police
*police
;
133 struct qdisc_rate_table
*R_tab
= NULL
, *P_tab
= NULL
;
135 if (rta
== NULL
|| rtattr_parse_nested(tb
, TCA_POLICE_MAX
, rta
) < 0)
138 if (tb
[TCA_POLICE_TBF
-1] == NULL
||
139 RTA_PAYLOAD(tb
[TCA_POLICE_TBF
-1]) != sizeof(*parm
))
141 parm
= RTA_DATA(tb
[TCA_POLICE_TBF
-1]);
143 if (tb
[TCA_POLICE_RESULT
-1] != NULL
&&
144 RTA_PAYLOAD(tb
[TCA_POLICE_RESULT
-1]) != sizeof(u32
))
146 if (tb
[TCA_POLICE_RESULT
-1] != NULL
&&
147 RTA_PAYLOAD(tb
[TCA_POLICE_RESULT
-1]) != sizeof(u32
))
151 struct tcf_common
*pc
;
153 pc
= tcf_hash_lookup(parm
->index
, &police_hash_info
);
156 police
= to_police(pc
);
158 police
->tcf_bindcnt
+= 1;
159 police
->tcf_refcnt
+= 1;
167 police
= kzalloc(sizeof(*police
), GFP_KERNEL
);
171 police
->tcf_refcnt
= 1;
172 spin_lock_init(&police
->tcf_lock
);
173 police
->tcf_stats_lock
= &police
->tcf_lock
;
175 police
->tcf_bindcnt
= 1;
177 if (parm
->rate
.rate
) {
179 R_tab
= qdisc_get_rtab(&parm
->rate
, tb
[TCA_POLICE_RATE
-1]);
182 if (parm
->peakrate
.rate
) {
183 P_tab
= qdisc_get_rtab(&parm
->peakrate
,
184 tb
[TCA_POLICE_PEAKRATE
-1]);
186 qdisc_put_rtab(R_tab
);
191 /* No failure allowed after this point */
192 spin_lock_bh(&police
->tcf_lock
);
194 qdisc_put_rtab(police
->tcfp_R_tab
);
195 police
->tcfp_R_tab
= R_tab
;
198 qdisc_put_rtab(police
->tcfp_P_tab
);
199 police
->tcfp_P_tab
= P_tab
;
202 if (tb
[TCA_POLICE_RESULT
-1])
203 police
->tcfp_result
= *(u32
*)RTA_DATA(tb
[TCA_POLICE_RESULT
-1]);
204 police
->tcfp_toks
= police
->tcfp_burst
= parm
->burst
;
205 police
->tcfp_mtu
= parm
->mtu
;
206 if (police
->tcfp_mtu
== 0) {
207 police
->tcfp_mtu
= ~0;
208 if (police
->tcfp_R_tab
)
209 police
->tcfp_mtu
= 255<<police
->tcfp_R_tab
->rate
.cell_log
;
211 if (police
->tcfp_P_tab
)
212 police
->tcfp_ptoks
= L2T_P(police
, police
->tcfp_mtu
);
213 police
->tcf_action
= parm
->action
;
215 #ifdef CONFIG_NET_ESTIMATOR
216 if (tb
[TCA_POLICE_AVRATE
-1])
217 police
->tcfp_ewma_rate
=
218 *(u32
*)RTA_DATA(tb
[TCA_POLICE_AVRATE
-1]);
220 gen_replace_estimator(&police
->tcf_bstats
,
221 &police
->tcf_rate_est
,
222 police
->tcf_stats_lock
, est
);
225 spin_unlock_bh(&police
->tcf_lock
);
226 if (ret
!= ACT_P_CREATED
)
229 PSCHED_GET_TIME(police
->tcfp_t_c
);
230 police
->tcf_index
= parm
->index
? parm
->index
:
231 tcf_hash_new_index(&police_idx_gen
, &police_hash_info
);
232 h
= tcf_hash(police
->tcf_index
, POL_TAB_MASK
);
233 write_lock_bh(&police_lock
);
234 police
->tcf_next
= tcf_police_ht
[h
];
235 tcf_police_ht
[h
] = &police
->common
;
236 write_unlock_bh(&police_lock
);
242 if (ret
== ACT_P_CREATED
)
247 static int tcf_act_police_cleanup(struct tc_action
*a
, int bind
)
249 struct tcf_police
*p
= a
->priv
;
252 return tcf_police_release(p
, bind
);
256 static int tcf_act_police(struct sk_buff
*skb
, struct tc_action
*a
,
257 struct tcf_result
*res
)
259 struct tcf_police
*police
= a
->priv
;
264 spin_lock(&police
->tcf_lock
);
266 police
->tcf_bstats
.bytes
+= skb
->len
;
267 police
->tcf_bstats
.packets
++;
269 #ifdef CONFIG_NET_ESTIMATOR
270 if (police
->tcfp_ewma_rate
&&
271 police
->tcf_rate_est
.bps
>= police
->tcfp_ewma_rate
) {
272 police
->tcf_qstats
.overlimits
++;
273 spin_unlock(&police
->tcf_lock
);
274 return police
->tcf_action
;
278 if (skb
->len
<= police
->tcfp_mtu
) {
279 if (police
->tcfp_R_tab
== NULL
) {
280 spin_unlock(&police
->tcf_lock
);
281 return police
->tcfp_result
;
284 PSCHED_GET_TIME(now
);
286 toks
= PSCHED_TDIFF_SAFE(now
, police
->tcfp_t_c
,
288 if (police
->tcfp_P_tab
) {
289 ptoks
= toks
+ police
->tcfp_ptoks
;
290 if (ptoks
> (long)L2T_P(police
, police
->tcfp_mtu
))
291 ptoks
= (long)L2T_P(police
, police
->tcfp_mtu
);
292 ptoks
-= L2T_P(police
, skb
->len
);
294 toks
+= police
->tcfp_toks
;
295 if (toks
> (long)police
->tcfp_burst
)
296 toks
= police
->tcfp_burst
;
297 toks
-= L2T(police
, skb
->len
);
298 if ((toks
|ptoks
) >= 0) {
299 police
->tcfp_t_c
= now
;
300 police
->tcfp_toks
= toks
;
301 police
->tcfp_ptoks
= ptoks
;
302 spin_unlock(&police
->tcf_lock
);
303 return police
->tcfp_result
;
307 police
->tcf_qstats
.overlimits
++;
308 spin_unlock(&police
->tcf_lock
);
309 return police
->tcf_action
;
313 tcf_act_police_dump(struct sk_buff
*skb
, struct tc_action
*a
, int bind
, int ref
)
315 unsigned char *b
= skb
->tail
;
316 struct tcf_police
*police
= a
->priv
;
317 struct tc_police opt
;
319 opt
.index
= police
->tcf_index
;
320 opt
.action
= police
->tcf_action
;
321 opt
.mtu
= police
->tcfp_mtu
;
322 opt
.burst
= police
->tcfp_burst
;
323 opt
.refcnt
= police
->tcf_refcnt
- ref
;
324 opt
.bindcnt
= police
->tcf_bindcnt
- bind
;
325 if (police
->tcfp_R_tab
)
326 opt
.rate
= police
->tcfp_R_tab
->rate
;
328 memset(&opt
.rate
, 0, sizeof(opt
.rate
));
329 if (police
->tcfp_P_tab
)
330 opt
.peakrate
= police
->tcfp_P_tab
->rate
;
332 memset(&opt
.peakrate
, 0, sizeof(opt
.peakrate
));
333 RTA_PUT(skb
, TCA_POLICE_TBF
, sizeof(opt
), &opt
);
334 if (police
->tcfp_result
)
335 RTA_PUT(skb
, TCA_POLICE_RESULT
, sizeof(int),
336 &police
->tcfp_result
);
337 #ifdef CONFIG_NET_ESTIMATOR
338 if (police
->tcfp_ewma_rate
)
339 RTA_PUT(skb
, TCA_POLICE_AVRATE
, 4, &police
->tcfp_ewma_rate
);
344 skb_trim(skb
, b
- skb
->data
);
348 MODULE_AUTHOR("Alexey Kuznetsov");
349 MODULE_DESCRIPTION("Policing actions");
350 MODULE_LICENSE("GPL");
352 static struct tc_action_ops act_police_ops
= {
354 .hinfo
= &police_hash_info
,
355 .type
= TCA_ID_POLICE
,
356 .capab
= TCA_CAP_NONE
,
357 .owner
= THIS_MODULE
,
358 .act
= tcf_act_police
,
359 .dump
= tcf_act_police_dump
,
360 .cleanup
= tcf_act_police_cleanup
,
361 .lookup
= tcf_hash_search
,
362 .init
= tcf_act_police_locate
,
363 .walk
= tcf_act_police_walker
367 police_init_module(void)
369 return tcf_register_action(&act_police_ops
);
373 police_cleanup_module(void)
375 tcf_unregister_action(&act_police_ops
);
378 module_init(police_init_module
);
379 module_exit(police_cleanup_module
);
381 #else /* CONFIG_NET_CLS_ACT */
383 static struct tcf_common
*tcf_police_lookup(u32 index
)
385 struct tcf_hashinfo
*hinfo
= &police_hash_info
;
386 struct tcf_common
*p
;
388 read_lock(hinfo
->lock
);
389 for (p
= hinfo
->htab
[tcf_hash(index
, hinfo
->hmask
)]; p
;
391 if (p
->tcfc_index
== index
)
394 read_unlock(hinfo
->lock
);
399 static u32
tcf_police_new_index(void)
401 u32
*idx_gen
= &police_idx_gen
;
407 } while (tcf_police_lookup(val
));
409 return (*idx_gen
= val
);
412 struct tcf_police
*tcf_police_locate(struct rtattr
*rta
, struct rtattr
*est
)
415 struct tcf_police
*police
;
416 struct rtattr
*tb
[TCA_POLICE_MAX
];
417 struct tc_police
*parm
;
419 if (rtattr_parse_nested(tb
, TCA_POLICE_MAX
, rta
) < 0)
422 if (tb
[TCA_POLICE_TBF
-1] == NULL
||
423 RTA_PAYLOAD(tb
[TCA_POLICE_TBF
-1]) != sizeof(*parm
))
426 parm
= RTA_DATA(tb
[TCA_POLICE_TBF
-1]);
429 struct tcf_common
*pc
;
431 pc
= tcf_police_lookup(parm
->index
);
433 police
= to_police(pc
);
434 police
->tcf_refcnt
++;
438 police
= kzalloc(sizeof(*police
), GFP_KERNEL
);
439 if (unlikely(!police
))
442 police
->tcf_refcnt
= 1;
443 spin_lock_init(&police
->tcf_lock
);
444 police
->tcf_stats_lock
= &police
->tcf_lock
;
445 if (parm
->rate
.rate
) {
447 qdisc_get_rtab(&parm
->rate
, tb
[TCA_POLICE_RATE
-1]);
448 if (police
->tcfp_R_tab
== NULL
)
450 if (parm
->peakrate
.rate
) {
452 qdisc_get_rtab(&parm
->peakrate
,
453 tb
[TCA_POLICE_PEAKRATE
-1]);
454 if (police
->tcfp_P_tab
== NULL
)
458 if (tb
[TCA_POLICE_RESULT
-1]) {
459 if (RTA_PAYLOAD(tb
[TCA_POLICE_RESULT
-1]) != sizeof(u32
))
461 police
->tcfp_result
= *(u32
*)RTA_DATA(tb
[TCA_POLICE_RESULT
-1]);
463 #ifdef CONFIG_NET_ESTIMATOR
464 if (tb
[TCA_POLICE_AVRATE
-1]) {
465 if (RTA_PAYLOAD(tb
[TCA_POLICE_AVRATE
-1]) != sizeof(u32
))
467 police
->tcfp_ewma_rate
=
468 *(u32
*)RTA_DATA(tb
[TCA_POLICE_AVRATE
-1]);
471 police
->tcfp_toks
= police
->tcfp_burst
= parm
->burst
;
472 police
->tcfp_mtu
= parm
->mtu
;
473 if (police
->tcfp_mtu
== 0) {
474 police
->tcfp_mtu
= ~0;
475 if (police
->tcfp_R_tab
)
476 police
->tcfp_mtu
= 255<<police
->tcfp_R_tab
->rate
.cell_log
;
478 if (police
->tcfp_P_tab
)
479 police
->tcfp_ptoks
= L2T_P(police
, police
->tcfp_mtu
);
480 PSCHED_GET_TIME(police
->tcfp_t_c
);
481 police
->tcf_index
= parm
->index
? parm
->index
:
482 tcf_police_new_index();
483 police
->tcf_action
= parm
->action
;
484 #ifdef CONFIG_NET_ESTIMATOR
486 gen_new_estimator(&police
->tcf_bstats
, &police
->tcf_rate_est
,
487 police
->tcf_stats_lock
, est
);
489 h
= tcf_hash(police
->tcf_index
, POL_TAB_MASK
);
490 write_lock_bh(&police_lock
);
491 police
->tcf_next
= tcf_police_ht
[h
];
492 tcf_police_ht
[h
] = &police
->common
;
493 write_unlock_bh(&police_lock
);
497 if (police
->tcfp_R_tab
)
498 qdisc_put_rtab(police
->tcfp_R_tab
);
503 int tcf_police(struct sk_buff
*skb
, struct tcf_police
*police
)
509 spin_lock(&police
->tcf_lock
);
511 police
->tcf_bstats
.bytes
+= skb
->len
;
512 police
->tcf_bstats
.packets
++;
514 #ifdef CONFIG_NET_ESTIMATOR
515 if (police
->tcfp_ewma_rate
&&
516 police
->tcf_rate_est
.bps
>= police
->tcfp_ewma_rate
) {
517 police
->tcf_qstats
.overlimits
++;
518 spin_unlock(&police
->tcf_lock
);
519 return police
->tcf_action
;
522 if (skb
->len
<= police
->tcfp_mtu
) {
523 if (police
->tcfp_R_tab
== NULL
) {
524 spin_unlock(&police
->tcf_lock
);
525 return police
->tcfp_result
;
528 PSCHED_GET_TIME(now
);
529 toks
= PSCHED_TDIFF_SAFE(now
, police
->tcfp_t_c
,
531 if (police
->tcfp_P_tab
) {
532 ptoks
= toks
+ police
->tcfp_ptoks
;
533 if (ptoks
> (long)L2T_P(police
, police
->tcfp_mtu
))
534 ptoks
= (long)L2T_P(police
, police
->tcfp_mtu
);
535 ptoks
-= L2T_P(police
, skb
->len
);
537 toks
+= police
->tcfp_toks
;
538 if (toks
> (long)police
->tcfp_burst
)
539 toks
= police
->tcfp_burst
;
540 toks
-= L2T(police
, skb
->len
);
541 if ((toks
|ptoks
) >= 0) {
542 police
->tcfp_t_c
= now
;
543 police
->tcfp_toks
= toks
;
544 police
->tcfp_ptoks
= ptoks
;
545 spin_unlock(&police
->tcf_lock
);
546 return police
->tcfp_result
;
550 police
->tcf_qstats
.overlimits
++;
551 spin_unlock(&police
->tcf_lock
);
552 return police
->tcf_action
;
554 EXPORT_SYMBOL(tcf_police
);
556 int tcf_police_dump(struct sk_buff
*skb
, struct tcf_police
*police
)
558 unsigned char *b
= skb
->tail
;
559 struct tc_police opt
;
561 opt
.index
= police
->tcf_index
;
562 opt
.action
= police
->tcf_action
;
563 opt
.mtu
= police
->tcfp_mtu
;
564 opt
.burst
= police
->tcfp_burst
;
565 if (police
->tcfp_R_tab
)
566 opt
.rate
= police
->tcfp_R_tab
->rate
;
568 memset(&opt
.rate
, 0, sizeof(opt
.rate
));
569 if (police
->tcfp_P_tab
)
570 opt
.peakrate
= police
->tcfp_P_tab
->rate
;
572 memset(&opt
.peakrate
, 0, sizeof(opt
.peakrate
));
573 RTA_PUT(skb
, TCA_POLICE_TBF
, sizeof(opt
), &opt
);
574 if (police
->tcfp_result
)
575 RTA_PUT(skb
, TCA_POLICE_RESULT
, sizeof(int),
576 &police
->tcfp_result
);
577 #ifdef CONFIG_NET_ESTIMATOR
578 if (police
->tcfp_ewma_rate
)
579 RTA_PUT(skb
, TCA_POLICE_AVRATE
, 4, &police
->tcfp_ewma_rate
);
584 skb_trim(skb
, b
- skb
->data
);
588 int tcf_police_dump_stats(struct sk_buff
*skb
, struct tcf_police
*police
)
592 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
,
593 TCA_XSTATS
, police
->tcf_stats_lock
,
597 if (gnet_stats_copy_basic(&d
, &police
->tcf_bstats
) < 0 ||
598 #ifdef CONFIG_NET_ESTIMATOR
599 gnet_stats_copy_rate_est(&d
, &police
->tcf_rate_est
) < 0 ||
601 gnet_stats_copy_queue(&d
, &police
->tcf_qstats
) < 0)
604 if (gnet_stats_finish_copy(&d
) < 0)
613 #endif /* CONFIG_NET_CLS_ACT */