Merge tag 'chrome-platform-for-linus-4.13' of git://git.kernel.org/pub/scm/linux...
[linux/fpc-iii.git] / net / sched / sch_mqprio.c
blobe0c02725cd487c2a2c5f063066f29faab8ae479d
1 /*
2 * net/sched/sch_mqprio.c
4 * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/errno.h>
16 #include <linux/skbuff.h>
17 #include <linux/module.h>
18 #include <net/netlink.h>
19 #include <net/pkt_sched.h>
20 #include <net/sch_generic.h>
22 struct mqprio_sched {
23 struct Qdisc **qdiscs;
24 int hw_offload;
27 static void mqprio_destroy(struct Qdisc *sch)
29 struct net_device *dev = qdisc_dev(sch);
30 struct mqprio_sched *priv = qdisc_priv(sch);
31 unsigned int ntx;
33 if (priv->qdiscs) {
34 for (ntx = 0;
35 ntx < dev->num_tx_queues && priv->qdiscs[ntx];
36 ntx++)
37 qdisc_destroy(priv->qdiscs[ntx]);
38 kfree(priv->qdiscs);
41 if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) {
42 struct tc_mqprio_qopt offload = { 0 };
43 struct tc_to_netdev tc = { .type = TC_SETUP_MQPRIO,
44 { .mqprio = &offload } };
46 dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, 0, &tc);
47 } else {
48 netdev_set_num_tc(dev, 0);
52 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
54 int i, j;
56 /* Verify num_tc is not out of max range */
57 if (qopt->num_tc > TC_MAX_QUEUE)
58 return -EINVAL;
60 /* Verify priority mapping uses valid tcs */
61 for (i = 0; i < TC_BITMASK + 1; i++) {
62 if (qopt->prio_tc_map[i] >= qopt->num_tc)
63 return -EINVAL;
66 /* Limit qopt->hw to maximum supported offload value. Drivers have
67 * the option of overriding this later if they don't support the a
68 * given offload type.
70 if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
71 qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
73 /* If hardware offload is requested we will leave it to the device
74 * to either populate the queue counts itself or to validate the
75 * provided queue counts. If ndo_setup_tc is not present then
76 * hardware doesn't support offload and we should return an error.
78 if (qopt->hw)
79 return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL;
81 for (i = 0; i < qopt->num_tc; i++) {
82 unsigned int last = qopt->offset[i] + qopt->count[i];
84 /* Verify the queue count is in tx range being equal to the
85 * real_num_tx_queues indicates the last queue is in use.
87 if (qopt->offset[i] >= dev->real_num_tx_queues ||
88 !qopt->count[i] ||
89 last > dev->real_num_tx_queues)
90 return -EINVAL;
92 /* Verify that the offset and counts do not overlap */
93 for (j = i + 1; j < qopt->num_tc; j++) {
94 if (last > qopt->offset[j])
95 return -EINVAL;
99 return 0;
102 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
104 struct net_device *dev = qdisc_dev(sch);
105 struct mqprio_sched *priv = qdisc_priv(sch);
106 struct netdev_queue *dev_queue;
107 struct Qdisc *qdisc;
108 int i, err = -EOPNOTSUPP;
109 struct tc_mqprio_qopt *qopt = NULL;
111 BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
112 BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
114 if (sch->parent != TC_H_ROOT)
115 return -EOPNOTSUPP;
117 if (!netif_is_multiqueue(dev))
118 return -EOPNOTSUPP;
120 if (!opt || nla_len(opt) < sizeof(*qopt))
121 return -EINVAL;
123 qopt = nla_data(opt);
124 if (mqprio_parse_opt(dev, qopt))
125 return -EINVAL;
127 /* pre-allocate qdisc, attachment can't fail */
128 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
129 GFP_KERNEL);
130 if (!priv->qdiscs)
131 return -ENOMEM;
133 for (i = 0; i < dev->num_tx_queues; i++) {
134 dev_queue = netdev_get_tx_queue(dev, i);
135 qdisc = qdisc_create_dflt(dev_queue,
136 get_default_qdisc_ops(dev, i),
137 TC_H_MAKE(TC_H_MAJ(sch->handle),
138 TC_H_MIN(i + 1)));
139 if (!qdisc)
140 return -ENOMEM;
142 priv->qdiscs[i] = qdisc;
143 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
146 /* If the mqprio options indicate that hardware should own
147 * the queue mapping then run ndo_setup_tc otherwise use the
148 * supplied and verified mapping
150 if (qopt->hw) {
151 struct tc_mqprio_qopt offload = *qopt;
152 struct tc_to_netdev tc = { .type = TC_SETUP_MQPRIO,
153 { .mqprio = &offload } };
155 err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle,
156 0, 0, &tc);
157 if (err)
158 return err;
160 priv->hw_offload = offload.hw;
161 } else {
162 netdev_set_num_tc(dev, qopt->num_tc);
163 for (i = 0; i < qopt->num_tc; i++)
164 netdev_set_tc_queue(dev, i,
165 qopt->count[i], qopt->offset[i]);
168 /* Always use supplied priority mappings */
169 for (i = 0; i < TC_BITMASK + 1; i++)
170 netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
172 sch->flags |= TCQ_F_MQROOT;
173 return 0;
176 static void mqprio_attach(struct Qdisc *sch)
178 struct net_device *dev = qdisc_dev(sch);
179 struct mqprio_sched *priv = qdisc_priv(sch);
180 struct Qdisc *qdisc, *old;
181 unsigned int ntx;
183 /* Attach underlying qdisc */
184 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
185 qdisc = priv->qdiscs[ntx];
186 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
187 if (old)
188 qdisc_destroy(old);
189 if (ntx < dev->real_num_tx_queues)
190 qdisc_hash_add(qdisc, false);
192 kfree(priv->qdiscs);
193 priv->qdiscs = NULL;
196 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
197 unsigned long cl)
199 struct net_device *dev = qdisc_dev(sch);
200 unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
202 if (ntx >= dev->num_tx_queues)
203 return NULL;
204 return netdev_get_tx_queue(dev, ntx);
207 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
208 struct Qdisc **old)
210 struct net_device *dev = qdisc_dev(sch);
211 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
213 if (!dev_queue)
214 return -EINVAL;
216 if (dev->flags & IFF_UP)
217 dev_deactivate(dev);
219 *old = dev_graft_qdisc(dev_queue, new);
221 if (new)
222 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
224 if (dev->flags & IFF_UP)
225 dev_activate(dev);
227 return 0;
230 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
232 struct net_device *dev = qdisc_dev(sch);
233 struct mqprio_sched *priv = qdisc_priv(sch);
234 unsigned char *b = skb_tail_pointer(skb);
235 struct tc_mqprio_qopt opt = { 0 };
236 struct Qdisc *qdisc;
237 unsigned int i;
239 sch->q.qlen = 0;
240 memset(&sch->bstats, 0, sizeof(sch->bstats));
241 memset(&sch->qstats, 0, sizeof(sch->qstats));
243 for (i = 0; i < dev->num_tx_queues; i++) {
244 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
245 spin_lock_bh(qdisc_lock(qdisc));
246 sch->q.qlen += qdisc->q.qlen;
247 sch->bstats.bytes += qdisc->bstats.bytes;
248 sch->bstats.packets += qdisc->bstats.packets;
249 sch->qstats.backlog += qdisc->qstats.backlog;
250 sch->qstats.drops += qdisc->qstats.drops;
251 sch->qstats.requeues += qdisc->qstats.requeues;
252 sch->qstats.overlimits += qdisc->qstats.overlimits;
253 spin_unlock_bh(qdisc_lock(qdisc));
256 opt.num_tc = netdev_get_num_tc(dev);
257 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
258 opt.hw = priv->hw_offload;
260 for (i = 0; i < netdev_get_num_tc(dev); i++) {
261 opt.count[i] = dev->tc_to_txq[i].count;
262 opt.offset[i] = dev->tc_to_txq[i].offset;
265 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
266 goto nla_put_failure;
268 return skb->len;
269 nla_put_failure:
270 nlmsg_trim(skb, b);
271 return -1;
274 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
276 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
278 if (!dev_queue)
279 return NULL;
281 return dev_queue->qdisc_sleeping;
284 static unsigned long mqprio_get(struct Qdisc *sch, u32 classid)
286 struct net_device *dev = qdisc_dev(sch);
287 unsigned int ntx = TC_H_MIN(classid);
289 if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
290 return 0;
291 return ntx;
294 static void mqprio_put(struct Qdisc *sch, unsigned long cl)
298 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
299 struct sk_buff *skb, struct tcmsg *tcm)
301 struct net_device *dev = qdisc_dev(sch);
303 if (cl <= netdev_get_num_tc(dev)) {
304 tcm->tcm_parent = TC_H_ROOT;
305 tcm->tcm_info = 0;
306 } else {
307 int i;
308 struct netdev_queue *dev_queue;
310 dev_queue = mqprio_queue_get(sch, cl);
311 tcm->tcm_parent = 0;
312 for (i = 0; i < netdev_get_num_tc(dev); i++) {
313 struct netdev_tc_txq tc = dev->tc_to_txq[i];
314 int q_idx = cl - netdev_get_num_tc(dev);
316 if (q_idx > tc.offset &&
317 q_idx <= tc.offset + tc.count) {
318 tcm->tcm_parent =
319 TC_H_MAKE(TC_H_MAJ(sch->handle),
320 TC_H_MIN(i + 1));
321 break;
324 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
326 tcm->tcm_handle |= TC_H_MIN(cl);
327 return 0;
330 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
331 struct gnet_dump *d)
332 __releases(d->lock)
333 __acquires(d->lock)
335 struct net_device *dev = qdisc_dev(sch);
337 if (cl <= netdev_get_num_tc(dev)) {
338 int i;
339 __u32 qlen = 0;
340 struct Qdisc *qdisc;
341 struct gnet_stats_queue qstats = {0};
342 struct gnet_stats_basic_packed bstats = {0};
343 struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
345 /* Drop lock here it will be reclaimed before touching
346 * statistics this is required because the d->lock we
347 * hold here is the look on dev_queue->qdisc_sleeping
348 * also acquired below.
350 if (d->lock)
351 spin_unlock_bh(d->lock);
353 for (i = tc.offset; i < tc.offset + tc.count; i++) {
354 struct netdev_queue *q = netdev_get_tx_queue(dev, i);
356 qdisc = rtnl_dereference(q->qdisc);
357 spin_lock_bh(qdisc_lock(qdisc));
358 qlen += qdisc->q.qlen;
359 bstats.bytes += qdisc->bstats.bytes;
360 bstats.packets += qdisc->bstats.packets;
361 qstats.backlog += qdisc->qstats.backlog;
362 qstats.drops += qdisc->qstats.drops;
363 qstats.requeues += qdisc->qstats.requeues;
364 qstats.overlimits += qdisc->qstats.overlimits;
365 spin_unlock_bh(qdisc_lock(qdisc));
367 /* Reclaim root sleeping lock before completing stats */
368 if (d->lock)
369 spin_lock_bh(d->lock);
370 if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 ||
371 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
372 return -1;
373 } else {
374 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
376 sch = dev_queue->qdisc_sleeping;
377 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
378 d, NULL, &sch->bstats) < 0 ||
379 gnet_stats_copy_queue(d, NULL,
380 &sch->qstats, sch->q.qlen) < 0)
381 return -1;
383 return 0;
386 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
388 struct net_device *dev = qdisc_dev(sch);
389 unsigned long ntx;
391 if (arg->stop)
392 return;
394 /* Walk hierarchy with a virtual class per tc */
395 arg->count = arg->skip;
396 for (ntx = arg->skip;
397 ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
398 ntx++) {
399 if (arg->fn(sch, ntx + 1, arg) < 0) {
400 arg->stop = 1;
401 break;
403 arg->count++;
407 static const struct Qdisc_class_ops mqprio_class_ops = {
408 .graft = mqprio_graft,
409 .leaf = mqprio_leaf,
410 .get = mqprio_get,
411 .put = mqprio_put,
412 .walk = mqprio_walk,
413 .dump = mqprio_dump_class,
414 .dump_stats = mqprio_dump_class_stats,
417 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
418 .cl_ops = &mqprio_class_ops,
419 .id = "mqprio",
420 .priv_size = sizeof(struct mqprio_sched),
421 .init = mqprio_init,
422 .destroy = mqprio_destroy,
423 .attach = mqprio_attach,
424 .dump = mqprio_dump,
425 .owner = THIS_MODULE,
428 static int __init mqprio_module_init(void)
430 return register_qdisc(&mqprio_qdisc_ops);
433 static void __exit mqprio_module_exit(void)
435 unregister_qdisc(&mqprio_qdisc_ops);
438 module_init(mqprio_module_init);
439 module_exit(mqprio_module_exit);
441 MODULE_LICENSE("GPL");