* better
[mascara-docs.git] / i386 / linux-2.3.21 / net / sched / police.c
blob57c10fbd3f9fed6e967eed60ada978b617d4d614
1 /*
2 * net/sched/police.c Input police filter.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <asm/uaccess.h>
13 #include <asm/system.h>
14 #include <asm/bitops.h>
15 #include <linux/config.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/string.h>
20 #include <linux/mm.h>
21 #include <linux/socket.h>
22 #include <linux/sockios.h>
23 #include <linux/in.h>
24 #include <linux/errno.h>
25 #include <linux/interrupt.h>
26 #include <linux/netdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/init.h>
30 #include <linux/proc_fs.h>
31 #include <net/sock.h>
32 #include <net/pkt_sched.h>
34 #define L2T(p,L) ((p)->R_tab->data[(L)>>(p)->R_tab->rate.cell_log])
35 #define L2T_P(p,L) ((p)->P_tab->data[(L)>>(p)->P_tab->rate.cell_log])
37 static u32 idx_gen;
38 static struct tcf_police *tcf_police_ht[16];
39 /* Policer hash table lock */
40 static rwlock_t police_lock = RW_LOCK_UNLOCKED;
42 /* Each policer is serialized by its individual spinlock */
44 static __inline__ unsigned tcf_police_hash(u32 index)
46 return index&0xF;
49 static __inline__ struct tcf_police * tcf_police_lookup(u32 index)
51 struct tcf_police *p;
53 read_lock(&police_lock);
54 for (p = tcf_police_ht[tcf_police_hash(index)]; p; p = p->next) {
55 if (p->index == index)
56 break;
58 read_unlock(&police_lock);
59 return p;
62 static __inline__ u32 tcf_police_new_index(void)
64 do {
65 if (++idx_gen == 0)
66 idx_gen = 1;
67 } while (tcf_police_lookup(idx_gen));
69 return idx_gen;
73 void tcf_police_destroy(struct tcf_police *p)
75 unsigned h = tcf_police_hash(p->index);
76 struct tcf_police **p1p;
78 for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->next) {
79 if (*p1p == p) {
80 write_lock_bh(&police_lock);
81 *p1p = p->next;
82 write_unlock_bh(&police_lock);
83 #ifdef CONFIG_NET_ESTIMATOR
84 qdisc_kill_estimator(&p->stats);
85 #endif
86 if (p->R_tab)
87 qdisc_put_rtab(p->R_tab);
88 if (p->P_tab)
89 qdisc_put_rtab(p->P_tab);
90 kfree(p);
91 return;
94 BUG_TRAP(0);
97 struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
99 unsigned h;
100 struct tcf_police *p;
101 struct rtattr *tb[TCA_POLICE_MAX];
102 struct tc_police *parm;
104 if (rtattr_parse(tb, TCA_POLICE_MAX, RTA_DATA(rta), RTA_PAYLOAD(rta)) < 0)
105 return NULL;
107 if (tb[TCA_POLICE_TBF-1] == NULL)
108 return NULL;
110 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
112 if (parm->index && (p = tcf_police_lookup(parm->index)) != NULL) {
113 p->refcnt++;
114 return p;
117 p = kmalloc(sizeof(*p), GFP_KERNEL);
118 if (p == NULL)
119 return NULL;
121 memset(p, 0, sizeof(*p));
122 p->refcnt = 1;
123 spin_lock_init(&p->lock);
124 p->stats.lock = &p->lock;
125 if (parm->rate.rate) {
126 if ((p->R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1])) == NULL)
127 goto failure;
128 if (parm->peakrate.rate &&
129 (p->P_tab = qdisc_get_rtab(&parm->peakrate, tb[TCA_POLICE_PEAKRATE-1])) == NULL)
130 goto failure;
132 if (tb[TCA_POLICE_RESULT-1])
133 p->result = *(int*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
134 #ifdef CONFIG_NET_ESTIMATOR
135 if (tb[TCA_POLICE_AVRATE-1])
136 p->ewma_rate = *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
137 #endif
138 p->toks = p->burst = parm->burst;
139 p->mtu = parm->mtu;
140 if (p->mtu == 0) {
141 p->mtu = ~0;
142 if (p->R_tab)
143 p->mtu = 255<<p->R_tab->rate.cell_log;
145 if (p->P_tab)
146 p->ptoks = L2T_P(p, p->mtu);
147 PSCHED_GET_TIME(p->t_c);
148 p->index = parm->index ? : tcf_police_new_index();
149 p->action = parm->action;
150 #ifdef CONFIG_NET_ESTIMATOR
151 if (est)
152 qdisc_new_estimator(&p->stats, est);
153 #endif
154 h = tcf_police_hash(p->index);
155 write_lock_bh(&police_lock);
156 p->next = tcf_police_ht[h];
157 tcf_police_ht[h] = p;
158 write_unlock_bh(&police_lock);
159 return p;
161 failure:
162 if (p->R_tab)
163 qdisc_put_rtab(p->R_tab);
164 kfree(p);
165 return NULL;
168 int tcf_police(struct sk_buff *skb, struct tcf_police *p)
170 psched_time_t now;
171 long toks;
172 long ptoks = 0;
174 spin_lock(&p->lock);
176 p->stats.bytes += skb->len;
177 p->stats.packets++;
179 #ifdef CONFIG_NET_ESTIMATOR
180 if (p->ewma_rate && p->stats.bps >= p->ewma_rate) {
181 p->stats.overlimits++;
182 spin_unlock(&p->lock);
183 return p->action;
185 #endif
187 if (skb->len <= p->mtu) {
188 if (p->R_tab == NULL) {
189 spin_unlock(&p->lock);
190 return p->result;
193 PSCHED_GET_TIME(now);
195 toks = PSCHED_TDIFF_SAFE(now, p->t_c, p->burst, 0);
197 if (p->P_tab) {
198 ptoks = toks + p->ptoks;
199 if (ptoks > (long)L2T_P(p, p->mtu))
200 ptoks = (long)L2T_P(p, p->mtu);
201 ptoks -= L2T_P(p, skb->len);
203 toks += p->toks;
204 if (toks > (long)p->burst)
205 toks = p->burst;
206 toks -= L2T(p, skb->len);
208 if ((toks|ptoks) >= 0) {
209 p->t_c = now;
210 p->toks = toks;
211 p->ptoks = ptoks;
212 spin_unlock(&p->lock);
213 return p->result;
217 p->stats.overlimits++;
218 spin_unlock(&p->lock);
219 return p->action;
222 #ifdef CONFIG_RTNETLINK
223 int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p)
225 unsigned char *b = skb->tail;
226 struct tc_police opt;
228 opt.index = p->index;
229 opt.action = p->action;
230 opt.mtu = p->mtu;
231 opt.burst = p->burst;
232 if (p->R_tab)
233 opt.rate = p->R_tab->rate;
234 else
235 memset(&opt.rate, 0, sizeof(opt.rate));
236 if (p->P_tab)
237 opt.peakrate = p->P_tab->rate;
238 else
239 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
240 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
241 if (p->result)
242 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), &p->result);
243 #ifdef CONFIG_NET_ESTIMATOR
244 if (p->ewma_rate)
245 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &p->ewma_rate);
246 #endif
247 return skb->len;
249 rtattr_failure:
250 skb_trim(skb, b - skb->data);
251 return -1;
253 #endif