page allocator: inline __rmqueue_fallback()
[linux/fpc-iii.git] / net / sched / act_police.c
blobf8f047b6124560c76d8cae54fa20062854553c48
1 /*
2 * net/sched/police.c Input police filter.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * J Hadi Salim (action changes)
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/init.h>
21 #include <net/act_api.h>
22 #include <net/netlink.h>
24 #define L2T(p,L) qdisc_l2t((p)->tcfp_R_tab, L)
25 #define L2T_P(p,L) qdisc_l2t((p)->tcfp_P_tab, L)
27 #define POL_TAB_MASK 15
28 static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
29 static u32 police_idx_gen;
30 static DEFINE_RWLOCK(police_lock);
32 static struct tcf_hashinfo police_hash_info = {
33 .htab = tcf_police_ht,
34 .hmask = POL_TAB_MASK,
35 .lock = &police_lock,
38 /* old policer structure from before tc actions */
39 struct tc_police_compat
41 u32 index;
42 int action;
43 u32 limit;
44 u32 burst;
45 u32 mtu;
46 struct tc_ratespec rate;
47 struct tc_ratespec peakrate;
50 /* Each policer is serialized by its individual spinlock */
52 static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
53 int type, struct tc_action *a)
55 struct tcf_common *p;
56 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
57 struct nlattr *nest;
59 read_lock_bh(&police_lock);
61 s_i = cb->args[0];
63 for (i = 0; i < (POL_TAB_MASK + 1); i++) {
64 p = tcf_police_ht[tcf_hash(i, POL_TAB_MASK)];
66 for (; p; p = p->tcfc_next) {
67 index++;
68 if (index < s_i)
69 continue;
70 a->priv = p;
71 a->order = index;
72 nest = nla_nest_start(skb, a->order);
73 if (nest == NULL)
74 goto nla_put_failure;
75 if (type == RTM_DELACTION)
76 err = tcf_action_dump_1(skb, a, 0, 1);
77 else
78 err = tcf_action_dump_1(skb, a, 0, 0);
79 if (err < 0) {
80 index--;
81 nla_nest_cancel(skb, nest);
82 goto done;
84 nla_nest_end(skb, nest);
85 n_i++;
88 done:
89 read_unlock_bh(&police_lock);
90 if (n_i)
91 cb->args[0] += n_i;
92 return n_i;
94 nla_put_failure:
95 nla_nest_cancel(skb, nest);
96 goto done;
99 static void tcf_police_destroy(struct tcf_police *p)
101 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
102 struct tcf_common **p1p;
104 for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
105 if (*p1p == &p->common) {
106 write_lock_bh(&police_lock);
107 *p1p = p->tcf_next;
108 write_unlock_bh(&police_lock);
109 gen_kill_estimator(&p->tcf_bstats,
110 &p->tcf_rate_est);
111 if (p->tcfp_R_tab)
112 qdisc_put_rtab(p->tcfp_R_tab);
113 if (p->tcfp_P_tab)
114 qdisc_put_rtab(p->tcfp_P_tab);
115 kfree(p);
116 return;
119 WARN_ON(1);
122 static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
123 [TCA_POLICE_RATE] = { .len = TC_RTAB_SIZE },
124 [TCA_POLICE_PEAKRATE] = { .len = TC_RTAB_SIZE },
125 [TCA_POLICE_AVRATE] = { .type = NLA_U32 },
126 [TCA_POLICE_RESULT] = { .type = NLA_U32 },
129 static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est,
130 struct tc_action *a, int ovr, int bind)
132 unsigned h;
133 int ret = 0, err;
134 struct nlattr *tb[TCA_POLICE_MAX + 1];
135 struct tc_police *parm;
136 struct tcf_police *police;
137 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
138 int size;
140 if (nla == NULL)
141 return -EINVAL;
143 err = nla_parse_nested(tb, TCA_POLICE_MAX, nla, police_policy);
144 if (err < 0)
145 return err;
147 if (tb[TCA_POLICE_TBF] == NULL)
148 return -EINVAL;
149 size = nla_len(tb[TCA_POLICE_TBF]);
150 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
151 return -EINVAL;
152 parm = nla_data(tb[TCA_POLICE_TBF]);
154 if (parm->index) {
155 struct tcf_common *pc;
157 pc = tcf_hash_lookup(parm->index, &police_hash_info);
158 if (pc != NULL) {
159 a->priv = pc;
160 police = to_police(pc);
161 if (bind) {
162 police->tcf_bindcnt += 1;
163 police->tcf_refcnt += 1;
165 if (ovr)
166 goto override;
167 return ret;
171 police = kzalloc(sizeof(*police), GFP_KERNEL);
172 if (police == NULL)
173 return -ENOMEM;
174 ret = ACT_P_CREATED;
175 police->tcf_refcnt = 1;
176 spin_lock_init(&police->tcf_lock);
177 if (bind)
178 police->tcf_bindcnt = 1;
179 override:
180 if (parm->rate.rate) {
181 err = -ENOMEM;
182 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE]);
183 if (R_tab == NULL)
184 goto failure;
186 if (parm->peakrate.rate) {
187 P_tab = qdisc_get_rtab(&parm->peakrate,
188 tb[TCA_POLICE_PEAKRATE]);
189 if (P_tab == NULL)
190 goto failure;
194 spin_lock_bh(&police->tcf_lock);
195 if (est) {
196 err = gen_replace_estimator(&police->tcf_bstats,
197 &police->tcf_rate_est,
198 &police->tcf_lock, est);
199 if (err)
200 goto failure_unlock;
201 } else if (tb[TCA_POLICE_AVRATE] &&
202 (ret == ACT_P_CREATED ||
203 !gen_estimator_active(&police->tcf_bstats,
204 &police->tcf_rate_est))) {
205 err = -EINVAL;
206 goto failure_unlock;
209 /* No failure allowed after this point */
210 if (R_tab != NULL) {
211 qdisc_put_rtab(police->tcfp_R_tab);
212 police->tcfp_R_tab = R_tab;
214 if (P_tab != NULL) {
215 qdisc_put_rtab(police->tcfp_P_tab);
216 police->tcfp_P_tab = P_tab;
219 if (tb[TCA_POLICE_RESULT])
220 police->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
221 police->tcfp_toks = police->tcfp_burst = parm->burst;
222 police->tcfp_mtu = parm->mtu;
223 if (police->tcfp_mtu == 0) {
224 police->tcfp_mtu = ~0;
225 if (police->tcfp_R_tab)
226 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
228 if (police->tcfp_P_tab)
229 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
230 police->tcf_action = parm->action;
232 if (tb[TCA_POLICE_AVRATE])
233 police->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
235 spin_unlock_bh(&police->tcf_lock);
236 if (ret != ACT_P_CREATED)
237 return ret;
239 police->tcfp_t_c = psched_get_time();
240 police->tcf_index = parm->index ? parm->index :
241 tcf_hash_new_index(&police_idx_gen, &police_hash_info);
242 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
243 write_lock_bh(&police_lock);
244 police->tcf_next = tcf_police_ht[h];
245 tcf_police_ht[h] = &police->common;
246 write_unlock_bh(&police_lock);
248 a->priv = police;
249 return ret;
251 failure_unlock:
252 spin_unlock_bh(&police->tcf_lock);
253 failure:
254 if (P_tab)
255 qdisc_put_rtab(P_tab);
256 if (R_tab)
257 qdisc_put_rtab(R_tab);
258 if (ret == ACT_P_CREATED)
259 kfree(police);
260 return err;
263 static int tcf_act_police_cleanup(struct tc_action *a, int bind)
265 struct tcf_police *p = a->priv;
266 int ret = 0;
268 if (p != NULL) {
269 if (bind)
270 p->tcf_bindcnt--;
272 p->tcf_refcnt--;
273 if (p->tcf_refcnt <= 0 && !p->tcf_bindcnt) {
274 tcf_police_destroy(p);
275 ret = 1;
278 return ret;
281 static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
282 struct tcf_result *res)
284 struct tcf_police *police = a->priv;
285 psched_time_t now;
286 long toks;
287 long ptoks = 0;
289 spin_lock(&police->tcf_lock);
291 police->tcf_bstats.bytes += qdisc_pkt_len(skb);
292 police->tcf_bstats.packets++;
294 if (police->tcfp_ewma_rate &&
295 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
296 police->tcf_qstats.overlimits++;
297 spin_unlock(&police->tcf_lock);
298 return police->tcf_action;
301 if (qdisc_pkt_len(skb) <= police->tcfp_mtu) {
302 if (police->tcfp_R_tab == NULL) {
303 spin_unlock(&police->tcf_lock);
304 return police->tcfp_result;
307 now = psched_get_time();
308 toks = psched_tdiff_bounded(now, police->tcfp_t_c,
309 police->tcfp_burst);
310 if (police->tcfp_P_tab) {
311 ptoks = toks + police->tcfp_ptoks;
312 if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
313 ptoks = (long)L2T_P(police, police->tcfp_mtu);
314 ptoks -= L2T_P(police, qdisc_pkt_len(skb));
316 toks += police->tcfp_toks;
317 if (toks > (long)police->tcfp_burst)
318 toks = police->tcfp_burst;
319 toks -= L2T(police, qdisc_pkt_len(skb));
320 if ((toks|ptoks) >= 0) {
321 police->tcfp_t_c = now;
322 police->tcfp_toks = toks;
323 police->tcfp_ptoks = ptoks;
324 spin_unlock(&police->tcf_lock);
325 return police->tcfp_result;
329 police->tcf_qstats.overlimits++;
330 spin_unlock(&police->tcf_lock);
331 return police->tcf_action;
334 static int
335 tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
337 unsigned char *b = skb_tail_pointer(skb);
338 struct tcf_police *police = a->priv;
339 struct tc_police opt;
341 opt.index = police->tcf_index;
342 opt.action = police->tcf_action;
343 opt.mtu = police->tcfp_mtu;
344 opt.burst = police->tcfp_burst;
345 opt.refcnt = police->tcf_refcnt - ref;
346 opt.bindcnt = police->tcf_bindcnt - bind;
347 if (police->tcfp_R_tab)
348 opt.rate = police->tcfp_R_tab->rate;
349 else
350 memset(&opt.rate, 0, sizeof(opt.rate));
351 if (police->tcfp_P_tab)
352 opt.peakrate = police->tcfp_P_tab->rate;
353 else
354 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
355 NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
356 if (police->tcfp_result)
357 NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result);
358 if (police->tcfp_ewma_rate)
359 NLA_PUT_U32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate);
360 return skb->len;
362 nla_put_failure:
363 nlmsg_trim(skb, b);
364 return -1;
367 MODULE_AUTHOR("Alexey Kuznetsov");
368 MODULE_DESCRIPTION("Policing actions");
369 MODULE_LICENSE("GPL");
371 static struct tc_action_ops act_police_ops = {
372 .kind = "police",
373 .hinfo = &police_hash_info,
374 .type = TCA_ID_POLICE,
375 .capab = TCA_CAP_NONE,
376 .owner = THIS_MODULE,
377 .act = tcf_act_police,
378 .dump = tcf_act_police_dump,
379 .cleanup = tcf_act_police_cleanup,
380 .lookup = tcf_hash_search,
381 .init = tcf_act_police_locate,
382 .walk = tcf_act_police_walker
385 static int __init
386 police_init_module(void)
388 return tcf_register_action(&act_police_ops);
391 static void __exit
392 police_cleanup_module(void)
394 tcf_unregister_action(&act_police_ops);
397 module_init(police_init_module);
398 module_exit(police_cleanup_module);