ARM: pmu: add support for interrupt-affinity property
[linux/fpc-iii.git] / net / sched / cls_fw.c
bloba5269f76004c2974a2e1e650433a7ba394710965
1 /*
2 * net/sched/cls_fw.c Classifier mapping ipchains' fwmark to traffic class.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * Changes:
12 * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_walk off by one
13 * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_delete killed all the filter (and kernel).
14 * Alex <alex@pilotsoft.com> : 2004xxyy: Added Action extension
16 * JHS: We should remove the CONFIG_NET_CLS_IND from here
17 * eventually when the meta match extension is made available
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/errno.h>
27 #include <linux/skbuff.h>
28 #include <net/netlink.h>
29 #include <net/act_api.h>
30 #include <net/pkt_cls.h>
32 #define HTSIZE 256
34 struct fw_head {
35 u32 mask;
36 struct fw_filter __rcu *ht[HTSIZE];
37 struct rcu_head rcu;
40 struct fw_filter {
41 struct fw_filter __rcu *next;
42 u32 id;
43 struct tcf_result res;
44 #ifdef CONFIG_NET_CLS_IND
45 int ifindex;
46 #endif /* CONFIG_NET_CLS_IND */
47 struct tcf_exts exts;
48 struct tcf_proto *tp;
49 struct rcu_head rcu;
52 static u32 fw_hash(u32 handle)
54 handle ^= (handle >> 16);
55 handle ^= (handle >> 8);
56 return handle % HTSIZE;
59 static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp,
60 struct tcf_result *res)
62 struct fw_head *head = rcu_dereference_bh(tp->root);
63 struct fw_filter *f;
64 int r;
65 u32 id = skb->mark;
67 if (head != NULL) {
68 id &= head->mask;
70 for (f = rcu_dereference_bh(head->ht[fw_hash(id)]); f;
71 f = rcu_dereference_bh(f->next)) {
72 if (f->id == id) {
73 *res = f->res;
74 #ifdef CONFIG_NET_CLS_IND
75 if (!tcf_match_indev(skb, f->ifindex))
76 continue;
77 #endif /* CONFIG_NET_CLS_IND */
78 r = tcf_exts_exec(skb, &f->exts, res);
79 if (r < 0)
80 continue;
82 return r;
85 } else {
86 /* old method */
87 if (id && (TC_H_MAJ(id) == 0 ||
88 !(TC_H_MAJ(id ^ tp->q->handle)))) {
89 res->classid = id;
90 res->class = 0;
91 return 0;
95 return -1;
98 static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
100 struct fw_head *head = rtnl_dereference(tp->root);
101 struct fw_filter *f;
103 if (head == NULL)
104 return 0;
106 f = rtnl_dereference(head->ht[fw_hash(handle)]);
107 for (; f; f = rtnl_dereference(f->next)) {
108 if (f->id == handle)
109 return (unsigned long)f;
111 return 0;
114 static int fw_init(struct tcf_proto *tp)
116 return 0;
119 static void fw_delete_filter(struct rcu_head *head)
121 struct fw_filter *f = container_of(head, struct fw_filter, rcu);
123 tcf_exts_destroy(&f->exts);
124 kfree(f);
127 static void fw_destroy(struct tcf_proto *tp)
129 struct fw_head *head = rtnl_dereference(tp->root);
130 struct fw_filter *f;
131 int h;
133 if (head == NULL)
134 return;
136 for (h = 0; h < HTSIZE; h++) {
137 while ((f = rtnl_dereference(head->ht[h])) != NULL) {
138 RCU_INIT_POINTER(head->ht[h],
139 rtnl_dereference(f->next));
140 tcf_unbind_filter(tp, &f->res);
141 call_rcu(&f->rcu, fw_delete_filter);
144 RCU_INIT_POINTER(tp->root, NULL);
145 kfree_rcu(head, rcu);
148 static int fw_delete(struct tcf_proto *tp, unsigned long arg)
150 struct fw_head *head = rtnl_dereference(tp->root);
151 struct fw_filter *f = (struct fw_filter *)arg;
152 struct fw_filter __rcu **fp;
153 struct fw_filter *pfp;
155 if (head == NULL || f == NULL)
156 goto out;
158 fp = &head->ht[fw_hash(f->id)];
160 for (pfp = rtnl_dereference(*fp); pfp;
161 fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
162 if (pfp == f) {
163 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
164 tcf_unbind_filter(tp, &f->res);
165 call_rcu(&f->rcu, fw_delete_filter);
166 return 0;
169 out:
170 return -EINVAL;
173 static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = {
174 [TCA_FW_CLASSID] = { .type = NLA_U32 },
175 [TCA_FW_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
176 [TCA_FW_MASK] = { .type = NLA_U32 },
179 static int
180 fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
181 struct nlattr **tb, struct nlattr **tca, unsigned long base, bool ovr)
183 struct fw_head *head = rtnl_dereference(tp->root);
184 struct tcf_exts e;
185 u32 mask;
186 int err;
188 tcf_exts_init(&e, TCA_FW_ACT, TCA_FW_POLICE);
189 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
190 if (err < 0)
191 return err;
193 if (tb[TCA_FW_CLASSID]) {
194 f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);
195 tcf_bind_filter(tp, &f->res, base);
198 #ifdef CONFIG_NET_CLS_IND
199 if (tb[TCA_FW_INDEV]) {
200 int ret;
201 ret = tcf_change_indev(net, tb[TCA_FW_INDEV]);
202 if (ret < 0) {
203 err = ret;
204 goto errout;
206 f->ifindex = ret;
208 #endif /* CONFIG_NET_CLS_IND */
210 err = -EINVAL;
211 if (tb[TCA_FW_MASK]) {
212 mask = nla_get_u32(tb[TCA_FW_MASK]);
213 if (mask != head->mask)
214 goto errout;
215 } else if (head->mask != 0xFFFFFFFF)
216 goto errout;
218 tcf_exts_change(tp, &f->exts, &e);
220 return 0;
221 errout:
222 tcf_exts_destroy(&e);
223 return err;
226 static int fw_change(struct net *net, struct sk_buff *in_skb,
227 struct tcf_proto *tp, unsigned long base,
228 u32 handle,
229 struct nlattr **tca,
230 unsigned long *arg, bool ovr)
232 struct fw_head *head = rtnl_dereference(tp->root);
233 struct fw_filter *f = (struct fw_filter *) *arg;
234 struct nlattr *opt = tca[TCA_OPTIONS];
235 struct nlattr *tb[TCA_FW_MAX + 1];
236 int err;
238 if (!opt)
239 return handle ? -EINVAL : 0;
241 err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy);
242 if (err < 0)
243 return err;
245 if (f) {
246 struct fw_filter *pfp, *fnew;
247 struct fw_filter __rcu **fp;
249 if (f->id != handle && handle)
250 return -EINVAL;
252 fnew = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
253 if (!fnew)
254 return -ENOBUFS;
256 fnew->id = f->id;
257 fnew->res = f->res;
258 #ifdef CONFIG_NET_CLS_IND
259 fnew->ifindex = f->ifindex;
260 #endif /* CONFIG_NET_CLS_IND */
261 fnew->tp = f->tp;
263 tcf_exts_init(&fnew->exts, TCA_FW_ACT, TCA_FW_POLICE);
265 err = fw_change_attrs(net, tp, fnew, tb, tca, base, ovr);
266 if (err < 0) {
267 kfree(fnew);
268 return err;
271 fp = &head->ht[fw_hash(fnew->id)];
272 for (pfp = rtnl_dereference(*fp); pfp;
273 fp = &pfp->next, pfp = rtnl_dereference(*fp))
274 if (pfp == f)
275 break;
277 RCU_INIT_POINTER(fnew->next, rtnl_dereference(pfp->next));
278 rcu_assign_pointer(*fp, fnew);
279 tcf_unbind_filter(tp, &f->res);
280 call_rcu(&f->rcu, fw_delete_filter);
282 *arg = (unsigned long)fnew;
283 return err;
286 if (!handle)
287 return -EINVAL;
289 if (head == NULL) {
290 u32 mask = 0xFFFFFFFF;
291 if (tb[TCA_FW_MASK])
292 mask = nla_get_u32(tb[TCA_FW_MASK]);
294 head = kzalloc(sizeof(struct fw_head), GFP_KERNEL);
295 if (head == NULL)
296 return -ENOBUFS;
297 head->mask = mask;
299 rcu_assign_pointer(tp->root, head);
302 f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
303 if (f == NULL)
304 return -ENOBUFS;
306 tcf_exts_init(&f->exts, TCA_FW_ACT, TCA_FW_POLICE);
307 f->id = handle;
308 f->tp = tp;
310 err = fw_change_attrs(net, tp, f, tb, tca, base, ovr);
311 if (err < 0)
312 goto errout;
314 RCU_INIT_POINTER(f->next, head->ht[fw_hash(handle)]);
315 rcu_assign_pointer(head->ht[fw_hash(handle)], f);
317 *arg = (unsigned long)f;
318 return 0;
320 errout:
321 kfree(f);
322 return err;
325 static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
327 struct fw_head *head = rtnl_dereference(tp->root);
328 int h;
330 if (head == NULL)
331 arg->stop = 1;
333 if (arg->stop)
334 return;
336 for (h = 0; h < HTSIZE; h++) {
337 struct fw_filter *f;
339 for (f = rtnl_dereference(head->ht[h]); f;
340 f = rtnl_dereference(f->next)) {
341 if (arg->count < arg->skip) {
342 arg->count++;
343 continue;
345 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
346 arg->stop = 1;
347 return;
349 arg->count++;
354 static int fw_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
355 struct sk_buff *skb, struct tcmsg *t)
357 struct fw_head *head = rtnl_dereference(tp->root);
358 struct fw_filter *f = (struct fw_filter *)fh;
359 struct nlattr *nest;
361 if (f == NULL)
362 return skb->len;
364 t->tcm_handle = f->id;
366 if (!f->res.classid && !tcf_exts_is_available(&f->exts))
367 return skb->len;
369 nest = nla_nest_start(skb, TCA_OPTIONS);
370 if (nest == NULL)
371 goto nla_put_failure;
373 if (f->res.classid &&
374 nla_put_u32(skb, TCA_FW_CLASSID, f->res.classid))
375 goto nla_put_failure;
376 #ifdef CONFIG_NET_CLS_IND
377 if (f->ifindex) {
378 struct net_device *dev;
379 dev = __dev_get_by_index(net, f->ifindex);
380 if (dev && nla_put_string(skb, TCA_FW_INDEV, dev->name))
381 goto nla_put_failure;
383 #endif /* CONFIG_NET_CLS_IND */
384 if (head->mask != 0xFFFFFFFF &&
385 nla_put_u32(skb, TCA_FW_MASK, head->mask))
386 goto nla_put_failure;
388 if (tcf_exts_dump(skb, &f->exts) < 0)
389 goto nla_put_failure;
391 nla_nest_end(skb, nest);
393 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
394 goto nla_put_failure;
396 return skb->len;
398 nla_put_failure:
399 nla_nest_cancel(skb, nest);
400 return -1;
403 static struct tcf_proto_ops cls_fw_ops __read_mostly = {
404 .kind = "fw",
405 .classify = fw_classify,
406 .init = fw_init,
407 .destroy = fw_destroy,
408 .get = fw_get,
409 .change = fw_change,
410 .delete = fw_delete,
411 .walk = fw_walk,
412 .dump = fw_dump,
413 .owner = THIS_MODULE,
416 static int __init init_fw(void)
418 return register_tcf_proto_ops(&cls_fw_ops);
421 static void __exit exit_fw(void)
423 unregister_tcf_proto_ops(&cls_fw_ops);
426 module_init(init_fw)
427 module_exit(exit_fw)
428 MODULE_LICENSE("GPL");