staging:rtl8712 Aligned code with open parenthesis
[linux/fpc-iii.git] / net / sched / sch_dsmark.c
blob1308bbf460f7ca7e496fe9d9c48da7eb2b0d282d
1 /* net/sched/sch_dsmark.c - Differentiated Services field marker */
3 /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/errno.h>
12 #include <linux/skbuff.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/bitops.h>
15 #include <net/pkt_sched.h>
16 #include <net/dsfield.h>
17 #include <net/inet_ecn.h>
18 #include <asm/byteorder.h>
21 * classid class marking
22 * ------- ----- -------
23 * n/a 0 n/a
24 * x:0 1 use entry [0]
25 * ... ... ...
26 * x:y y>0 y+1 use entry [y]
27 * ... ... ...
28 * x:indices-1 indices use entry [indices-1]
29 * ... ... ...
30 * x:y y+1 use entry [y & (indices-1)]
31 * ... ... ...
32 * 0xffff 0x10000 use entry [indices-1]
36 #define NO_DEFAULT_INDEX (1 << 16)
38 struct mask_value {
39 u8 mask;
40 u8 value;
43 struct dsmark_qdisc_data {
44 struct Qdisc *q;
45 struct tcf_proto __rcu *filter_list;
46 struct mask_value *mv;
47 u16 indices;
48 u8 set_tc_index;
49 u32 default_index; /* index range is 0...0xffff */
50 #define DSMARK_EMBEDDED_SZ 16
51 struct mask_value embedded[DSMARK_EMBEDDED_SZ];
54 static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
56 return index <= p->indices && index > 0;
59 /* ------------------------- Class/flow operations ------------------------- */
61 static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
62 struct Qdisc *new, struct Qdisc **old)
64 struct dsmark_qdisc_data *p = qdisc_priv(sch);
66 pr_debug("%s(sch %p,[qdisc %p],new %p,old %p)\n",
67 __func__, sch, p, new, old);
69 if (new == NULL) {
70 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
71 sch->handle);
72 if (new == NULL)
73 new = &noop_qdisc;
76 *old = qdisc_replace(sch, new, &p->q);
77 return 0;
80 static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
82 struct dsmark_qdisc_data *p = qdisc_priv(sch);
83 return p->q;
86 static unsigned long dsmark_get(struct Qdisc *sch, u32 classid)
88 pr_debug("%s(sch %p,[qdisc %p],classid %x)\n",
89 __func__, sch, qdisc_priv(sch), classid);
91 return TC_H_MIN(classid) + 1;
94 static unsigned long dsmark_bind_filter(struct Qdisc *sch,
95 unsigned long parent, u32 classid)
97 return dsmark_get(sch, classid);
100 static void dsmark_put(struct Qdisc *sch, unsigned long cl)
104 static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
105 [TCA_DSMARK_INDICES] = { .type = NLA_U16 },
106 [TCA_DSMARK_DEFAULT_INDEX] = { .type = NLA_U16 },
107 [TCA_DSMARK_SET_TC_INDEX] = { .type = NLA_FLAG },
108 [TCA_DSMARK_MASK] = { .type = NLA_U8 },
109 [TCA_DSMARK_VALUE] = { .type = NLA_U8 },
112 static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
113 struct nlattr **tca, unsigned long *arg)
115 struct dsmark_qdisc_data *p = qdisc_priv(sch);
116 struct nlattr *opt = tca[TCA_OPTIONS];
117 struct nlattr *tb[TCA_DSMARK_MAX + 1];
118 int err = -EINVAL;
120 pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n",
121 __func__, sch, p, classid, parent, *arg);
123 if (!dsmark_valid_index(p, *arg)) {
124 err = -ENOENT;
125 goto errout;
128 if (!opt)
129 goto errout;
131 err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy);
132 if (err < 0)
133 goto errout;
135 if (tb[TCA_DSMARK_VALUE])
136 p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
138 if (tb[TCA_DSMARK_MASK])
139 p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
141 err = 0;
143 errout:
144 return err;
147 static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
149 struct dsmark_qdisc_data *p = qdisc_priv(sch);
151 if (!dsmark_valid_index(p, arg))
152 return -EINVAL;
154 p->mv[arg - 1].mask = 0xff;
155 p->mv[arg - 1].value = 0;
157 return 0;
160 static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
162 struct dsmark_qdisc_data *p = qdisc_priv(sch);
163 int i;
165 pr_debug("%s(sch %p,[qdisc %p],walker %p)\n",
166 __func__, sch, p, walker);
168 if (walker->stop)
169 return;
171 for (i = 0; i < p->indices; i++) {
172 if (p->mv[i].mask == 0xff && !p->mv[i].value)
173 goto ignore;
174 if (walker->count >= walker->skip) {
175 if (walker->fn(sch, i + 1, walker) < 0) {
176 walker->stop = 1;
177 break;
180 ignore:
181 walker->count++;
185 static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch,
186 unsigned long cl)
188 struct dsmark_qdisc_data *p = qdisc_priv(sch);
189 return &p->filter_list;
192 /* --------------------------- Qdisc operations ---------------------------- */
194 static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
195 struct sk_buff **to_free)
197 struct dsmark_qdisc_data *p = qdisc_priv(sch);
198 int err;
200 pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
202 if (p->set_tc_index) {
203 switch (tc_skb_protocol(skb)) {
204 case htons(ETH_P_IP):
205 if (skb_cow_head(skb, sizeof(struct iphdr)))
206 goto drop;
208 skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
209 & ~INET_ECN_MASK;
210 break;
212 case htons(ETH_P_IPV6):
213 if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
214 goto drop;
216 skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
217 & ~INET_ECN_MASK;
218 break;
219 default:
220 skb->tc_index = 0;
221 break;
225 if (TC_H_MAJ(skb->priority) == sch->handle)
226 skb->tc_index = TC_H_MIN(skb->priority);
227 else {
228 struct tcf_result res;
229 struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
230 int result = tc_classify(skb, fl, &res, false);
232 pr_debug("result %d class 0x%04x\n", result, res.classid);
234 switch (result) {
235 #ifdef CONFIG_NET_CLS_ACT
236 case TC_ACT_QUEUED:
237 case TC_ACT_STOLEN:
238 __qdisc_drop(skb, to_free);
239 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
241 case TC_ACT_SHOT:
242 goto drop;
243 #endif
244 case TC_ACT_OK:
245 skb->tc_index = TC_H_MIN(res.classid);
246 break;
248 default:
249 if (p->default_index != NO_DEFAULT_INDEX)
250 skb->tc_index = p->default_index;
251 break;
255 err = qdisc_enqueue(skb, p->q, to_free);
256 if (err != NET_XMIT_SUCCESS) {
257 if (net_xmit_drop_count(err))
258 qdisc_qstats_drop(sch);
259 return err;
262 qdisc_qstats_backlog_inc(sch, skb);
263 sch->q.qlen++;
265 return NET_XMIT_SUCCESS;
267 drop:
268 qdisc_drop(skb, sch, to_free);
269 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
272 static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
274 struct dsmark_qdisc_data *p = qdisc_priv(sch);
275 struct sk_buff *skb;
276 u32 index;
278 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
280 skb = qdisc_dequeue_peeked(p->q);
281 if (skb == NULL)
282 return NULL;
284 qdisc_bstats_update(sch, skb);
285 qdisc_qstats_backlog_dec(sch, skb);
286 sch->q.qlen--;
288 index = skb->tc_index & (p->indices - 1);
289 pr_debug("index %d->%d\n", skb->tc_index, index);
291 switch (tc_skb_protocol(skb)) {
292 case htons(ETH_P_IP):
293 ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
294 p->mv[index].value);
295 break;
296 case htons(ETH_P_IPV6):
297 ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
298 p->mv[index].value);
299 break;
300 default:
302 * Only complain if a change was actually attempted.
303 * This way, we can send non-IP traffic through dsmark
304 * and don't need yet another qdisc as a bypass.
306 if (p->mv[index].mask != 0xff || p->mv[index].value)
307 pr_warn("%s: unsupported protocol %d\n",
308 __func__, ntohs(tc_skb_protocol(skb)));
309 break;
312 return skb;
315 static struct sk_buff *dsmark_peek(struct Qdisc *sch)
317 struct dsmark_qdisc_data *p = qdisc_priv(sch);
319 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
321 return p->q->ops->peek(p->q);
324 static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
326 struct dsmark_qdisc_data *p = qdisc_priv(sch);
327 struct nlattr *tb[TCA_DSMARK_MAX + 1];
328 int err = -EINVAL;
329 u32 default_index = NO_DEFAULT_INDEX;
330 u16 indices;
331 int i;
333 pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
335 if (!opt)
336 goto errout;
338 err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy);
339 if (err < 0)
340 goto errout;
342 err = -EINVAL;
343 indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
345 if (hweight32(indices) != 1)
346 goto errout;
348 if (tb[TCA_DSMARK_DEFAULT_INDEX])
349 default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
351 if (indices <= DSMARK_EMBEDDED_SZ)
352 p->mv = p->embedded;
353 else
354 p->mv = kmalloc_array(indices, sizeof(*p->mv), GFP_KERNEL);
355 if (!p->mv) {
356 err = -ENOMEM;
357 goto errout;
359 for (i = 0; i < indices; i++) {
360 p->mv[i].mask = 0xff;
361 p->mv[i].value = 0;
363 p->indices = indices;
364 p->default_index = default_index;
365 p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
367 p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle);
368 if (p->q == NULL)
369 p->q = &noop_qdisc;
371 pr_debug("%s: qdisc %p\n", __func__, p->q);
373 err = 0;
374 errout:
375 return err;
378 static void dsmark_reset(struct Qdisc *sch)
380 struct dsmark_qdisc_data *p = qdisc_priv(sch);
382 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
383 qdisc_reset(p->q);
384 sch->qstats.backlog = 0;
385 sch->q.qlen = 0;
388 static void dsmark_destroy(struct Qdisc *sch)
390 struct dsmark_qdisc_data *p = qdisc_priv(sch);
392 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
394 tcf_destroy_chain(&p->filter_list);
395 qdisc_destroy(p->q);
396 if (p->mv != p->embedded)
397 kfree(p->mv);
400 static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
401 struct sk_buff *skb, struct tcmsg *tcm)
403 struct dsmark_qdisc_data *p = qdisc_priv(sch);
404 struct nlattr *opts = NULL;
406 pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
408 if (!dsmark_valid_index(p, cl))
409 return -EINVAL;
411 tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
412 tcm->tcm_info = p->q->handle;
414 opts = nla_nest_start(skb, TCA_OPTIONS);
415 if (opts == NULL)
416 goto nla_put_failure;
417 if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
418 nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
419 goto nla_put_failure;
421 return nla_nest_end(skb, opts);
423 nla_put_failure:
424 nla_nest_cancel(skb, opts);
425 return -EMSGSIZE;
428 static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
430 struct dsmark_qdisc_data *p = qdisc_priv(sch);
431 struct nlattr *opts = NULL;
433 opts = nla_nest_start(skb, TCA_OPTIONS);
434 if (opts == NULL)
435 goto nla_put_failure;
436 if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
437 goto nla_put_failure;
439 if (p->default_index != NO_DEFAULT_INDEX &&
440 nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
441 goto nla_put_failure;
443 if (p->set_tc_index &&
444 nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
445 goto nla_put_failure;
447 return nla_nest_end(skb, opts);
449 nla_put_failure:
450 nla_nest_cancel(skb, opts);
451 return -EMSGSIZE;
454 static const struct Qdisc_class_ops dsmark_class_ops = {
455 .graft = dsmark_graft,
456 .leaf = dsmark_leaf,
457 .get = dsmark_get,
458 .put = dsmark_put,
459 .change = dsmark_change,
460 .delete = dsmark_delete,
461 .walk = dsmark_walk,
462 .tcf_chain = dsmark_find_tcf,
463 .bind_tcf = dsmark_bind_filter,
464 .unbind_tcf = dsmark_put,
465 .dump = dsmark_dump_class,
468 static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
469 .next = NULL,
470 .cl_ops = &dsmark_class_ops,
471 .id = "dsmark",
472 .priv_size = sizeof(struct dsmark_qdisc_data),
473 .enqueue = dsmark_enqueue,
474 .dequeue = dsmark_dequeue,
475 .peek = dsmark_peek,
476 .init = dsmark_init,
477 .reset = dsmark_reset,
478 .destroy = dsmark_destroy,
479 .change = NULL,
480 .dump = dsmark_dump,
481 .owner = THIS_MODULE,
484 static int __init dsmark_module_init(void)
486 return register_qdisc(&dsmark_qdisc_ops);
489 static void __exit dsmark_module_exit(void)
491 unregister_qdisc(&dsmark_qdisc_ops);
494 module_init(dsmark_module_init)
495 module_exit(dsmark_module_exit)
497 MODULE_LICENSE("GPL");