x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / net / sched / sch_mqprio.c
blob6bcdfe6e7b63a39e1c697043375fc3c4995a4a50
1 /*
2 * net/sched/sch_mqprio.c
4 * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/errno.h>
16 #include <linux/skbuff.h>
17 #include <linux/module.h>
18 #include <net/netlink.h>
19 #include <net/pkt_sched.h>
20 #include <net/sch_generic.h>
22 struct mqprio_sched {
23 struct Qdisc **qdiscs;
24 int hw_offload;
27 static void mqprio_destroy(struct Qdisc *sch)
29 struct net_device *dev = qdisc_dev(sch);
30 struct mqprio_sched *priv = qdisc_priv(sch);
31 unsigned int ntx;
33 if (priv->qdiscs) {
34 for (ntx = 0;
35 ntx < dev->num_tx_queues && priv->qdiscs[ntx];
36 ntx++)
37 qdisc_destroy(priv->qdiscs[ntx]);
38 kfree(priv->qdiscs);
41 if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) {
42 struct tc_mqprio_qopt mqprio = {};
44 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, &mqprio);
45 } else {
46 netdev_set_num_tc(dev, 0);
50 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
52 int i, j;
54 /* Verify num_tc is not out of max range */
55 if (qopt->num_tc > TC_MAX_QUEUE)
56 return -EINVAL;
58 /* Verify priority mapping uses valid tcs */
59 for (i = 0; i < TC_BITMASK + 1; i++) {
60 if (qopt->prio_tc_map[i] >= qopt->num_tc)
61 return -EINVAL;
64 /* Limit qopt->hw to maximum supported offload value. Drivers have
65 * the option of overriding this later if they don't support the a
66 * given offload type.
68 if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
69 qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
71 /* If hardware offload is requested we will leave it to the device
72 * to either populate the queue counts itself or to validate the
73 * provided queue counts. If ndo_setup_tc is not present then
74 * hardware doesn't support offload and we should return an error.
76 if (qopt->hw)
77 return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL;
79 for (i = 0; i < qopt->num_tc; i++) {
80 unsigned int last = qopt->offset[i] + qopt->count[i];
82 /* Verify the queue count is in tx range being equal to the
83 * real_num_tx_queues indicates the last queue is in use.
85 if (qopt->offset[i] >= dev->real_num_tx_queues ||
86 !qopt->count[i] ||
87 last > dev->real_num_tx_queues)
88 return -EINVAL;
90 /* Verify that the offset and counts do not overlap */
91 for (j = i + 1; j < qopt->num_tc; j++) {
92 if (last > qopt->offset[j])
93 return -EINVAL;
97 return 0;
100 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
102 struct net_device *dev = qdisc_dev(sch);
103 struct mqprio_sched *priv = qdisc_priv(sch);
104 struct netdev_queue *dev_queue;
105 struct Qdisc *qdisc;
106 int i, err = -EOPNOTSUPP;
107 struct tc_mqprio_qopt *qopt = NULL;
109 BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
110 BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
112 if (sch->parent != TC_H_ROOT)
113 return -EOPNOTSUPP;
115 if (!netif_is_multiqueue(dev))
116 return -EOPNOTSUPP;
118 if (!opt || nla_len(opt) < sizeof(*qopt))
119 return -EINVAL;
121 qopt = nla_data(opt);
122 if (mqprio_parse_opt(dev, qopt))
123 return -EINVAL;
125 /* pre-allocate qdisc, attachment can't fail */
126 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
127 GFP_KERNEL);
128 if (!priv->qdiscs)
129 return -ENOMEM;
131 for (i = 0; i < dev->num_tx_queues; i++) {
132 dev_queue = netdev_get_tx_queue(dev, i);
133 qdisc = qdisc_create_dflt(dev_queue,
134 get_default_qdisc_ops(dev, i),
135 TC_H_MAKE(TC_H_MAJ(sch->handle),
136 TC_H_MIN(i + 1)));
137 if (!qdisc)
138 return -ENOMEM;
140 priv->qdiscs[i] = qdisc;
141 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
144 /* If the mqprio options indicate that hardware should own
145 * the queue mapping then run ndo_setup_tc otherwise use the
146 * supplied and verified mapping
148 if (qopt->hw) {
149 struct tc_mqprio_qopt mqprio = *qopt;
151 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO,
152 &mqprio);
153 if (err)
154 return err;
156 priv->hw_offload = mqprio.hw;
157 } else {
158 netdev_set_num_tc(dev, qopt->num_tc);
159 for (i = 0; i < qopt->num_tc; i++)
160 netdev_set_tc_queue(dev, i,
161 qopt->count[i], qopt->offset[i]);
164 /* Always use supplied priority mappings */
165 for (i = 0; i < TC_BITMASK + 1; i++)
166 netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
168 sch->flags |= TCQ_F_MQROOT;
169 return 0;
172 static void mqprio_attach(struct Qdisc *sch)
174 struct net_device *dev = qdisc_dev(sch);
175 struct mqprio_sched *priv = qdisc_priv(sch);
176 struct Qdisc *qdisc, *old;
177 unsigned int ntx;
179 /* Attach underlying qdisc */
180 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
181 qdisc = priv->qdiscs[ntx];
182 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
183 if (old)
184 qdisc_destroy(old);
185 if (ntx < dev->real_num_tx_queues)
186 qdisc_hash_add(qdisc, false);
188 kfree(priv->qdiscs);
189 priv->qdiscs = NULL;
192 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
193 unsigned long cl)
195 struct net_device *dev = qdisc_dev(sch);
196 unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
198 if (ntx >= dev->num_tx_queues)
199 return NULL;
200 return netdev_get_tx_queue(dev, ntx);
203 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
204 struct Qdisc **old)
206 struct net_device *dev = qdisc_dev(sch);
207 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
209 if (!dev_queue)
210 return -EINVAL;
212 if (dev->flags & IFF_UP)
213 dev_deactivate(dev);
215 *old = dev_graft_qdisc(dev_queue, new);
217 if (new)
218 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
220 if (dev->flags & IFF_UP)
221 dev_activate(dev);
223 return 0;
226 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
228 struct net_device *dev = qdisc_dev(sch);
229 struct mqprio_sched *priv = qdisc_priv(sch);
230 unsigned char *b = skb_tail_pointer(skb);
231 struct tc_mqprio_qopt opt = { 0 };
232 struct Qdisc *qdisc;
233 unsigned int i;
235 sch->q.qlen = 0;
236 memset(&sch->bstats, 0, sizeof(sch->bstats));
237 memset(&sch->qstats, 0, sizeof(sch->qstats));
239 for (i = 0; i < dev->num_tx_queues; i++) {
240 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
241 spin_lock_bh(qdisc_lock(qdisc));
242 sch->q.qlen += qdisc->q.qlen;
243 sch->bstats.bytes += qdisc->bstats.bytes;
244 sch->bstats.packets += qdisc->bstats.packets;
245 sch->qstats.backlog += qdisc->qstats.backlog;
246 sch->qstats.drops += qdisc->qstats.drops;
247 sch->qstats.requeues += qdisc->qstats.requeues;
248 sch->qstats.overlimits += qdisc->qstats.overlimits;
249 spin_unlock_bh(qdisc_lock(qdisc));
252 opt.num_tc = netdev_get_num_tc(dev);
253 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
254 opt.hw = priv->hw_offload;
256 for (i = 0; i < netdev_get_num_tc(dev); i++) {
257 opt.count[i] = dev->tc_to_txq[i].count;
258 opt.offset[i] = dev->tc_to_txq[i].offset;
261 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
262 goto nla_put_failure;
264 return skb->len;
265 nla_put_failure:
266 nlmsg_trim(skb, b);
267 return -1;
270 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
272 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
274 if (!dev_queue)
275 return NULL;
277 return dev_queue->qdisc_sleeping;
280 static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
282 struct net_device *dev = qdisc_dev(sch);
283 unsigned int ntx = TC_H_MIN(classid);
285 if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
286 return 0;
287 return ntx;
290 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
291 struct sk_buff *skb, struct tcmsg *tcm)
293 struct net_device *dev = qdisc_dev(sch);
295 if (cl <= netdev_get_num_tc(dev)) {
296 tcm->tcm_parent = TC_H_ROOT;
297 tcm->tcm_info = 0;
298 } else {
299 int i;
300 struct netdev_queue *dev_queue;
302 dev_queue = mqprio_queue_get(sch, cl);
303 tcm->tcm_parent = 0;
304 for (i = 0; i < netdev_get_num_tc(dev); i++) {
305 struct netdev_tc_txq tc = dev->tc_to_txq[i];
306 int q_idx = cl - netdev_get_num_tc(dev);
308 if (q_idx > tc.offset &&
309 q_idx <= tc.offset + tc.count) {
310 tcm->tcm_parent =
311 TC_H_MAKE(TC_H_MAJ(sch->handle),
312 TC_H_MIN(i + 1));
313 break;
316 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
318 tcm->tcm_handle |= TC_H_MIN(cl);
319 return 0;
322 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
323 struct gnet_dump *d)
324 __releases(d->lock)
325 __acquires(d->lock)
327 struct net_device *dev = qdisc_dev(sch);
329 if (cl <= netdev_get_num_tc(dev)) {
330 int i;
331 __u32 qlen = 0;
332 struct Qdisc *qdisc;
333 struct gnet_stats_queue qstats = {0};
334 struct gnet_stats_basic_packed bstats = {0};
335 struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
337 /* Drop lock here it will be reclaimed before touching
338 * statistics this is required because the d->lock we
339 * hold here is the look on dev_queue->qdisc_sleeping
340 * also acquired below.
342 if (d->lock)
343 spin_unlock_bh(d->lock);
345 for (i = tc.offset; i < tc.offset + tc.count; i++) {
346 struct netdev_queue *q = netdev_get_tx_queue(dev, i);
348 qdisc = rtnl_dereference(q->qdisc);
349 spin_lock_bh(qdisc_lock(qdisc));
350 qlen += qdisc->q.qlen;
351 bstats.bytes += qdisc->bstats.bytes;
352 bstats.packets += qdisc->bstats.packets;
353 qstats.backlog += qdisc->qstats.backlog;
354 qstats.drops += qdisc->qstats.drops;
355 qstats.requeues += qdisc->qstats.requeues;
356 qstats.overlimits += qdisc->qstats.overlimits;
357 spin_unlock_bh(qdisc_lock(qdisc));
359 /* Reclaim root sleeping lock before completing stats */
360 if (d->lock)
361 spin_lock_bh(d->lock);
362 if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 ||
363 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
364 return -1;
365 } else {
366 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
368 sch = dev_queue->qdisc_sleeping;
369 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
370 d, NULL, &sch->bstats) < 0 ||
371 gnet_stats_copy_queue(d, NULL,
372 &sch->qstats, sch->q.qlen) < 0)
373 return -1;
375 return 0;
378 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
380 struct net_device *dev = qdisc_dev(sch);
381 unsigned long ntx;
383 if (arg->stop)
384 return;
386 /* Walk hierarchy with a virtual class per tc */
387 arg->count = arg->skip;
388 for (ntx = arg->skip;
389 ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
390 ntx++) {
391 if (arg->fn(sch, ntx + 1, arg) < 0) {
392 arg->stop = 1;
393 break;
395 arg->count++;
399 static const struct Qdisc_class_ops mqprio_class_ops = {
400 .graft = mqprio_graft,
401 .leaf = mqprio_leaf,
402 .find = mqprio_find,
403 .walk = mqprio_walk,
404 .dump = mqprio_dump_class,
405 .dump_stats = mqprio_dump_class_stats,
408 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
409 .cl_ops = &mqprio_class_ops,
410 .id = "mqprio",
411 .priv_size = sizeof(struct mqprio_sched),
412 .init = mqprio_init,
413 .destroy = mqprio_destroy,
414 .attach = mqprio_attach,
415 .dump = mqprio_dump,
416 .owner = THIS_MODULE,
419 static int __init mqprio_module_init(void)
421 return register_qdisc(&mqprio_qdisc_ops);
424 static void __exit mqprio_module_exit(void)
426 unregister_qdisc(&mqprio_qdisc_ops);
429 module_init(mqprio_module_init);
430 module_exit(mqprio_module_exit);
432 MODULE_LICENSE("GPL");