mm: Use static initialization for "srcu"
[linux/fpc-iii.git] / net / sched / sch_dsmark.c
blob802ac7c2e5e87eed1341ba4c09d3e5d70bc75876
1 /* net/sched/sch_dsmark.c - Differentiated Services field marker */
3 /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/errno.h>
12 #include <linux/skbuff.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/bitops.h>
15 #include <net/pkt_sched.h>
16 #include <net/pkt_cls.h>
17 #include <net/dsfield.h>
18 #include <net/inet_ecn.h>
19 #include <asm/byteorder.h>
22 * classid class marking
23 * ------- ----- -------
24 * n/a 0 n/a
25 * x:0 1 use entry [0]
26 * ... ... ...
27 * x:y y>0 y+1 use entry [y]
28 * ... ... ...
29 * x:indices-1 indices use entry [indices-1]
30 * ... ... ...
31 * x:y y+1 use entry [y & (indices-1)]
32 * ... ... ...
33 * 0xffff 0x10000 use entry [indices-1]
37 #define NO_DEFAULT_INDEX (1 << 16)
39 struct mask_value {
40 u8 mask;
41 u8 value;
44 struct dsmark_qdisc_data {
45 struct Qdisc *q;
46 struct tcf_proto __rcu *filter_list;
47 struct mask_value *mv;
48 u16 indices;
49 u8 set_tc_index;
50 u32 default_index; /* index range is 0...0xffff */
51 #define DSMARK_EMBEDDED_SZ 16
52 struct mask_value embedded[DSMARK_EMBEDDED_SZ];
55 static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
57 return index <= p->indices && index > 0;
60 /* ------------------------- Class/flow operations ------------------------- */
62 static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
63 struct Qdisc *new, struct Qdisc **old)
65 struct dsmark_qdisc_data *p = qdisc_priv(sch);
67 pr_debug("%s(sch %p,[qdisc %p],new %p,old %p)\n",
68 __func__, sch, p, new, old);
70 if (new == NULL) {
71 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
72 sch->handle);
73 if (new == NULL)
74 new = &noop_qdisc;
77 *old = qdisc_replace(sch, new, &p->q);
78 return 0;
81 static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
83 struct dsmark_qdisc_data *p = qdisc_priv(sch);
84 return p->q;
87 static unsigned long dsmark_get(struct Qdisc *sch, u32 classid)
89 pr_debug("%s(sch %p,[qdisc %p],classid %x)\n",
90 __func__, sch, qdisc_priv(sch), classid);
92 return TC_H_MIN(classid) + 1;
95 static unsigned long dsmark_bind_filter(struct Qdisc *sch,
96 unsigned long parent, u32 classid)
98 return dsmark_get(sch, classid);
101 static void dsmark_put(struct Qdisc *sch, unsigned long cl)
105 static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
106 [TCA_DSMARK_INDICES] = { .type = NLA_U16 },
107 [TCA_DSMARK_DEFAULT_INDEX] = { .type = NLA_U16 },
108 [TCA_DSMARK_SET_TC_INDEX] = { .type = NLA_FLAG },
109 [TCA_DSMARK_MASK] = { .type = NLA_U8 },
110 [TCA_DSMARK_VALUE] = { .type = NLA_U8 },
113 static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
114 struct nlattr **tca, unsigned long *arg)
116 struct dsmark_qdisc_data *p = qdisc_priv(sch);
117 struct nlattr *opt = tca[TCA_OPTIONS];
118 struct nlattr *tb[TCA_DSMARK_MAX + 1];
119 int err = -EINVAL;
121 pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n",
122 __func__, sch, p, classid, parent, *arg);
124 if (!dsmark_valid_index(p, *arg)) {
125 err = -ENOENT;
126 goto errout;
129 if (!opt)
130 goto errout;
132 err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy);
133 if (err < 0)
134 goto errout;
136 if (tb[TCA_DSMARK_VALUE])
137 p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
139 if (tb[TCA_DSMARK_MASK])
140 p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
142 err = 0;
144 errout:
145 return err;
148 static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
150 struct dsmark_qdisc_data *p = qdisc_priv(sch);
152 if (!dsmark_valid_index(p, arg))
153 return -EINVAL;
155 p->mv[arg - 1].mask = 0xff;
156 p->mv[arg - 1].value = 0;
158 return 0;
161 static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
163 struct dsmark_qdisc_data *p = qdisc_priv(sch);
164 int i;
166 pr_debug("%s(sch %p,[qdisc %p],walker %p)\n",
167 __func__, sch, p, walker);
169 if (walker->stop)
170 return;
172 for (i = 0; i < p->indices; i++) {
173 if (p->mv[i].mask == 0xff && !p->mv[i].value)
174 goto ignore;
175 if (walker->count >= walker->skip) {
176 if (walker->fn(sch, i + 1, walker) < 0) {
177 walker->stop = 1;
178 break;
181 ignore:
182 walker->count++;
186 static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch,
187 unsigned long cl)
189 struct dsmark_qdisc_data *p = qdisc_priv(sch);
190 return &p->filter_list;
193 /* --------------------------- Qdisc operations ---------------------------- */
195 static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
196 struct sk_buff **to_free)
198 struct dsmark_qdisc_data *p = qdisc_priv(sch);
199 int err;
201 pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
203 if (p->set_tc_index) {
204 switch (tc_skb_protocol(skb)) {
205 case htons(ETH_P_IP):
206 if (skb_cow_head(skb, sizeof(struct iphdr)))
207 goto drop;
209 skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
210 & ~INET_ECN_MASK;
211 break;
213 case htons(ETH_P_IPV6):
214 if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
215 goto drop;
217 skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
218 & ~INET_ECN_MASK;
219 break;
220 default:
221 skb->tc_index = 0;
222 break;
226 if (TC_H_MAJ(skb->priority) == sch->handle)
227 skb->tc_index = TC_H_MIN(skb->priority);
228 else {
229 struct tcf_result res;
230 struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
231 int result = tc_classify(skb, fl, &res, false);
233 pr_debug("result %d class 0x%04x\n", result, res.classid);
235 switch (result) {
236 #ifdef CONFIG_NET_CLS_ACT
237 case TC_ACT_QUEUED:
238 case TC_ACT_STOLEN:
239 __qdisc_drop(skb, to_free);
240 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
242 case TC_ACT_SHOT:
243 goto drop;
244 #endif
245 case TC_ACT_OK:
246 skb->tc_index = TC_H_MIN(res.classid);
247 break;
249 default:
250 if (p->default_index != NO_DEFAULT_INDEX)
251 skb->tc_index = p->default_index;
252 break;
256 err = qdisc_enqueue(skb, p->q, to_free);
257 if (err != NET_XMIT_SUCCESS) {
258 if (net_xmit_drop_count(err))
259 qdisc_qstats_drop(sch);
260 return err;
263 qdisc_qstats_backlog_inc(sch, skb);
264 sch->q.qlen++;
266 return NET_XMIT_SUCCESS;
268 drop:
269 qdisc_drop(skb, sch, to_free);
270 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
273 static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
275 struct dsmark_qdisc_data *p = qdisc_priv(sch);
276 struct sk_buff *skb;
277 u32 index;
279 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
281 skb = qdisc_dequeue_peeked(p->q);
282 if (skb == NULL)
283 return NULL;
285 qdisc_bstats_update(sch, skb);
286 qdisc_qstats_backlog_dec(sch, skb);
287 sch->q.qlen--;
289 index = skb->tc_index & (p->indices - 1);
290 pr_debug("index %d->%d\n", skb->tc_index, index);
292 switch (tc_skb_protocol(skb)) {
293 case htons(ETH_P_IP):
294 ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
295 p->mv[index].value);
296 break;
297 case htons(ETH_P_IPV6):
298 ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
299 p->mv[index].value);
300 break;
301 default:
303 * Only complain if a change was actually attempted.
304 * This way, we can send non-IP traffic through dsmark
305 * and don't need yet another qdisc as a bypass.
307 if (p->mv[index].mask != 0xff || p->mv[index].value)
308 pr_warn("%s: unsupported protocol %d\n",
309 __func__, ntohs(tc_skb_protocol(skb)));
310 break;
313 return skb;
316 static struct sk_buff *dsmark_peek(struct Qdisc *sch)
318 struct dsmark_qdisc_data *p = qdisc_priv(sch);
320 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
322 return p->q->ops->peek(p->q);
325 static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
327 struct dsmark_qdisc_data *p = qdisc_priv(sch);
328 struct nlattr *tb[TCA_DSMARK_MAX + 1];
329 int err = -EINVAL;
330 u32 default_index = NO_DEFAULT_INDEX;
331 u16 indices;
332 int i;
334 pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
336 if (!opt)
337 goto errout;
339 err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy);
340 if (err < 0)
341 goto errout;
343 err = -EINVAL;
344 indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
346 if (hweight32(indices) != 1)
347 goto errout;
349 if (tb[TCA_DSMARK_DEFAULT_INDEX])
350 default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
352 if (indices <= DSMARK_EMBEDDED_SZ)
353 p->mv = p->embedded;
354 else
355 p->mv = kmalloc_array(indices, sizeof(*p->mv), GFP_KERNEL);
356 if (!p->mv) {
357 err = -ENOMEM;
358 goto errout;
360 for (i = 0; i < indices; i++) {
361 p->mv[i].mask = 0xff;
362 p->mv[i].value = 0;
364 p->indices = indices;
365 p->default_index = default_index;
366 p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
368 p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle);
369 if (p->q == NULL)
370 p->q = &noop_qdisc;
372 pr_debug("%s: qdisc %p\n", __func__, p->q);
374 err = 0;
375 errout:
376 return err;
379 static void dsmark_reset(struct Qdisc *sch)
381 struct dsmark_qdisc_data *p = qdisc_priv(sch);
383 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
384 qdisc_reset(p->q);
385 sch->qstats.backlog = 0;
386 sch->q.qlen = 0;
389 static void dsmark_destroy(struct Qdisc *sch)
391 struct dsmark_qdisc_data *p = qdisc_priv(sch);
393 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
395 tcf_destroy_chain(&p->filter_list);
396 qdisc_destroy(p->q);
397 if (p->mv != p->embedded)
398 kfree(p->mv);
401 static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
402 struct sk_buff *skb, struct tcmsg *tcm)
404 struct dsmark_qdisc_data *p = qdisc_priv(sch);
405 struct nlattr *opts = NULL;
407 pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
409 if (!dsmark_valid_index(p, cl))
410 return -EINVAL;
412 tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
413 tcm->tcm_info = p->q->handle;
415 opts = nla_nest_start(skb, TCA_OPTIONS);
416 if (opts == NULL)
417 goto nla_put_failure;
418 if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
419 nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
420 goto nla_put_failure;
422 return nla_nest_end(skb, opts);
424 nla_put_failure:
425 nla_nest_cancel(skb, opts);
426 return -EMSGSIZE;
429 static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
431 struct dsmark_qdisc_data *p = qdisc_priv(sch);
432 struct nlattr *opts = NULL;
434 opts = nla_nest_start(skb, TCA_OPTIONS);
435 if (opts == NULL)
436 goto nla_put_failure;
437 if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
438 goto nla_put_failure;
440 if (p->default_index != NO_DEFAULT_INDEX &&
441 nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
442 goto nla_put_failure;
444 if (p->set_tc_index &&
445 nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
446 goto nla_put_failure;
448 return nla_nest_end(skb, opts);
450 nla_put_failure:
451 nla_nest_cancel(skb, opts);
452 return -EMSGSIZE;
455 static const struct Qdisc_class_ops dsmark_class_ops = {
456 .graft = dsmark_graft,
457 .leaf = dsmark_leaf,
458 .get = dsmark_get,
459 .put = dsmark_put,
460 .change = dsmark_change,
461 .delete = dsmark_delete,
462 .walk = dsmark_walk,
463 .tcf_chain = dsmark_find_tcf,
464 .bind_tcf = dsmark_bind_filter,
465 .unbind_tcf = dsmark_put,
466 .dump = dsmark_dump_class,
469 static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
470 .next = NULL,
471 .cl_ops = &dsmark_class_ops,
472 .id = "dsmark",
473 .priv_size = sizeof(struct dsmark_qdisc_data),
474 .enqueue = dsmark_enqueue,
475 .dequeue = dsmark_dequeue,
476 .peek = dsmark_peek,
477 .init = dsmark_init,
478 .reset = dsmark_reset,
479 .destroy = dsmark_destroy,
480 .change = NULL,
481 .dump = dsmark_dump,
482 .owner = THIS_MODULE,
485 static int __init dsmark_module_init(void)
487 return register_qdisc(&dsmark_qdisc_ops);
490 static void __exit dsmark_module_exit(void)
492 unregister_qdisc(&dsmark_qdisc_ops);
495 module_init(dsmark_module_init)
496 module_exit(dsmark_module_exit)
498 MODULE_LICENSE("GPL");