Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / net / sched / sch_mqprio.c
blob8766ab5b8788042df03b0b8cdb9e35734ef1a8a7
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * net/sched/sch_mqprio.c
5 * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
6 */
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/kernel.h>
11 #include <linux/string.h>
12 #include <linux/errno.h>
13 #include <linux/skbuff.h>
14 #include <linux/module.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
17 #include <net/sch_generic.h>
18 #include <net/pkt_cls.h>
20 struct mqprio_sched {
21 struct Qdisc **qdiscs;
22 u16 mode;
23 u16 shaper;
24 int hw_offload;
25 u32 flags;
26 u64 min_rate[TC_QOPT_MAX_QUEUE];
27 u64 max_rate[TC_QOPT_MAX_QUEUE];
30 static void mqprio_destroy(struct Qdisc *sch)
32 struct net_device *dev = qdisc_dev(sch);
33 struct mqprio_sched *priv = qdisc_priv(sch);
34 unsigned int ntx;
36 if (priv->qdiscs) {
37 for (ntx = 0;
38 ntx < dev->num_tx_queues && priv->qdiscs[ntx];
39 ntx++)
40 qdisc_put(priv->qdiscs[ntx]);
41 kfree(priv->qdiscs);
44 if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) {
45 struct tc_mqprio_qopt_offload mqprio = { { 0 } };
47 switch (priv->mode) {
48 case TC_MQPRIO_MODE_DCB:
49 case TC_MQPRIO_MODE_CHANNEL:
50 dev->netdev_ops->ndo_setup_tc(dev,
51 TC_SETUP_QDISC_MQPRIO,
52 &mqprio);
53 break;
54 default:
55 return;
57 } else {
58 netdev_set_num_tc(dev, 0);
62 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
64 int i, j;
66 /* Verify num_tc is not out of max range */
67 if (qopt->num_tc > TC_MAX_QUEUE)
68 return -EINVAL;
70 /* Verify priority mapping uses valid tcs */
71 for (i = 0; i < TC_BITMASK + 1; i++) {
72 if (qopt->prio_tc_map[i] >= qopt->num_tc)
73 return -EINVAL;
76 /* Limit qopt->hw to maximum supported offload value. Drivers have
77 * the option of overriding this later if they don't support the a
78 * given offload type.
80 if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
81 qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
83 /* If hardware offload is requested we will leave it to the device
84 * to either populate the queue counts itself or to validate the
85 * provided queue counts. If ndo_setup_tc is not present then
86 * hardware doesn't support offload and we should return an error.
88 if (qopt->hw)
89 return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL;
91 for (i = 0; i < qopt->num_tc; i++) {
92 unsigned int last = qopt->offset[i] + qopt->count[i];
94 /* Verify the queue count is in tx range being equal to the
95 * real_num_tx_queues indicates the last queue is in use.
97 if (qopt->offset[i] >= dev->real_num_tx_queues ||
98 !qopt->count[i] ||
99 last > dev->real_num_tx_queues)
100 return -EINVAL;
102 /* Verify that the offset and counts do not overlap */
103 for (j = i + 1; j < qopt->num_tc; j++) {
104 if (last > qopt->offset[j])
105 return -EINVAL;
109 return 0;
112 static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = {
113 [TCA_MQPRIO_MODE] = { .len = sizeof(u16) },
114 [TCA_MQPRIO_SHAPER] = { .len = sizeof(u16) },
115 [TCA_MQPRIO_MIN_RATE64] = { .type = NLA_NESTED },
116 [TCA_MQPRIO_MAX_RATE64] = { .type = NLA_NESTED },
119 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
120 const struct nla_policy *policy, int len)
122 int nested_len = nla_len(nla) - NLA_ALIGN(len);
124 if (nested_len >= nla_attr_size(0))
125 return nla_parse_deprecated(tb, maxtype,
126 nla_data(nla) + NLA_ALIGN(len),
127 nested_len, policy, NULL);
129 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
130 return 0;
133 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
134 struct netlink_ext_ack *extack)
136 struct net_device *dev = qdisc_dev(sch);
137 struct mqprio_sched *priv = qdisc_priv(sch);
138 struct netdev_queue *dev_queue;
139 struct Qdisc *qdisc;
140 int i, err = -EOPNOTSUPP;
141 struct tc_mqprio_qopt *qopt = NULL;
142 struct nlattr *tb[TCA_MQPRIO_MAX + 1];
143 struct nlattr *attr;
144 int rem;
145 int len;
147 BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
148 BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
150 if (sch->parent != TC_H_ROOT)
151 return -EOPNOTSUPP;
153 if (!netif_is_multiqueue(dev))
154 return -EOPNOTSUPP;
156 /* make certain can allocate enough classids to handle queues */
157 if (dev->num_tx_queues >= TC_H_MIN_PRIORITY)
158 return -ENOMEM;
160 if (!opt || nla_len(opt) < sizeof(*qopt))
161 return -EINVAL;
163 qopt = nla_data(opt);
164 if (mqprio_parse_opt(dev, qopt))
165 return -EINVAL;
167 len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
168 if (len > 0) {
169 err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
170 sizeof(*qopt));
171 if (err < 0)
172 return err;
174 if (!qopt->hw)
175 return -EINVAL;
177 if (tb[TCA_MQPRIO_MODE]) {
178 priv->flags |= TC_MQPRIO_F_MODE;
179 priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
182 if (tb[TCA_MQPRIO_SHAPER]) {
183 priv->flags |= TC_MQPRIO_F_SHAPER;
184 priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
187 if (tb[TCA_MQPRIO_MIN_RATE64]) {
188 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
189 return -EINVAL;
190 i = 0;
191 nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
192 rem) {
193 if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64)
194 return -EINVAL;
195 if (i >= qopt->num_tc)
196 break;
197 priv->min_rate[i] = *(u64 *)nla_data(attr);
198 i++;
200 priv->flags |= TC_MQPRIO_F_MIN_RATE;
203 if (tb[TCA_MQPRIO_MAX_RATE64]) {
204 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
205 return -EINVAL;
206 i = 0;
207 nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
208 rem) {
209 if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64)
210 return -EINVAL;
211 if (i >= qopt->num_tc)
212 break;
213 priv->max_rate[i] = *(u64 *)nla_data(attr);
214 i++;
216 priv->flags |= TC_MQPRIO_F_MAX_RATE;
220 /* pre-allocate qdisc, attachment can't fail */
221 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
222 GFP_KERNEL);
223 if (!priv->qdiscs)
224 return -ENOMEM;
226 for (i = 0; i < dev->num_tx_queues; i++) {
227 dev_queue = netdev_get_tx_queue(dev, i);
228 qdisc = qdisc_create_dflt(dev_queue,
229 get_default_qdisc_ops(dev, i),
230 TC_H_MAKE(TC_H_MAJ(sch->handle),
231 TC_H_MIN(i + 1)), extack);
232 if (!qdisc)
233 return -ENOMEM;
235 priv->qdiscs[i] = qdisc;
236 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
239 /* If the mqprio options indicate that hardware should own
240 * the queue mapping then run ndo_setup_tc otherwise use the
241 * supplied and verified mapping
243 if (qopt->hw) {
244 struct tc_mqprio_qopt_offload mqprio = {.qopt = *qopt};
246 switch (priv->mode) {
247 case TC_MQPRIO_MODE_DCB:
248 if (priv->shaper != TC_MQPRIO_SHAPER_DCB)
249 return -EINVAL;
250 break;
251 case TC_MQPRIO_MODE_CHANNEL:
252 mqprio.flags = priv->flags;
253 if (priv->flags & TC_MQPRIO_F_MODE)
254 mqprio.mode = priv->mode;
255 if (priv->flags & TC_MQPRIO_F_SHAPER)
256 mqprio.shaper = priv->shaper;
257 if (priv->flags & TC_MQPRIO_F_MIN_RATE)
258 for (i = 0; i < mqprio.qopt.num_tc; i++)
259 mqprio.min_rate[i] = priv->min_rate[i];
260 if (priv->flags & TC_MQPRIO_F_MAX_RATE)
261 for (i = 0; i < mqprio.qopt.num_tc; i++)
262 mqprio.max_rate[i] = priv->max_rate[i];
263 break;
264 default:
265 return -EINVAL;
267 err = dev->netdev_ops->ndo_setup_tc(dev,
268 TC_SETUP_QDISC_MQPRIO,
269 &mqprio);
270 if (err)
271 return err;
273 priv->hw_offload = mqprio.qopt.hw;
274 } else {
275 netdev_set_num_tc(dev, qopt->num_tc);
276 for (i = 0; i < qopt->num_tc; i++)
277 netdev_set_tc_queue(dev, i,
278 qopt->count[i], qopt->offset[i]);
281 /* Always use supplied priority mappings */
282 for (i = 0; i < TC_BITMASK + 1; i++)
283 netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
285 sch->flags |= TCQ_F_MQROOT;
286 return 0;
289 static void mqprio_attach(struct Qdisc *sch)
291 struct net_device *dev = qdisc_dev(sch);
292 struct mqprio_sched *priv = qdisc_priv(sch);
293 struct Qdisc *qdisc, *old;
294 unsigned int ntx;
296 /* Attach underlying qdisc */
297 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
298 qdisc = priv->qdiscs[ntx];
299 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
300 if (old)
301 qdisc_put(old);
302 if (ntx < dev->real_num_tx_queues)
303 qdisc_hash_add(qdisc, false);
305 kfree(priv->qdiscs);
306 priv->qdiscs = NULL;
309 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
310 unsigned long cl)
312 struct net_device *dev = qdisc_dev(sch);
313 unsigned long ntx = cl - 1;
315 if (ntx >= dev->num_tx_queues)
316 return NULL;
317 return netdev_get_tx_queue(dev, ntx);
320 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
321 struct Qdisc **old, struct netlink_ext_ack *extack)
323 struct net_device *dev = qdisc_dev(sch);
324 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
326 if (!dev_queue)
327 return -EINVAL;
329 if (dev->flags & IFF_UP)
330 dev_deactivate(dev);
332 *old = dev_graft_qdisc(dev_queue, new);
334 if (new)
335 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
337 if (dev->flags & IFF_UP)
338 dev_activate(dev);
340 return 0;
343 static int dump_rates(struct mqprio_sched *priv,
344 struct tc_mqprio_qopt *opt, struct sk_buff *skb)
346 struct nlattr *nest;
347 int i;
349 if (priv->flags & TC_MQPRIO_F_MIN_RATE) {
350 nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MIN_RATE64);
351 if (!nest)
352 goto nla_put_failure;
354 for (i = 0; i < opt->num_tc; i++) {
355 if (nla_put(skb, TCA_MQPRIO_MIN_RATE64,
356 sizeof(priv->min_rate[i]),
357 &priv->min_rate[i]))
358 goto nla_put_failure;
360 nla_nest_end(skb, nest);
363 if (priv->flags & TC_MQPRIO_F_MAX_RATE) {
364 nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MAX_RATE64);
365 if (!nest)
366 goto nla_put_failure;
368 for (i = 0; i < opt->num_tc; i++) {
369 if (nla_put(skb, TCA_MQPRIO_MAX_RATE64,
370 sizeof(priv->max_rate[i]),
371 &priv->max_rate[i]))
372 goto nla_put_failure;
374 nla_nest_end(skb, nest);
376 return 0;
378 nla_put_failure:
379 nla_nest_cancel(skb, nest);
380 return -1;
383 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
385 struct net_device *dev = qdisc_dev(sch);
386 struct mqprio_sched *priv = qdisc_priv(sch);
387 struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
388 struct tc_mqprio_qopt opt = { 0 };
389 struct Qdisc *qdisc;
390 unsigned int ntx, tc;
392 sch->q.qlen = 0;
393 memset(&sch->bstats, 0, sizeof(sch->bstats));
394 memset(&sch->qstats, 0, sizeof(sch->qstats));
396 /* MQ supports lockless qdiscs. However, statistics accounting needs
397 * to account for all, none, or a mix of locked and unlocked child
398 * qdiscs. Percpu stats are added to counters in-band and locking
399 * qdisc totals are added at end.
401 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
402 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
403 spin_lock_bh(qdisc_lock(qdisc));
405 if (qdisc_is_percpu_stats(qdisc)) {
406 __u32 qlen = qdisc_qlen_sum(qdisc);
408 __gnet_stats_copy_basic(NULL, &sch->bstats,
409 qdisc->cpu_bstats,
410 &qdisc->bstats);
411 __gnet_stats_copy_queue(&sch->qstats,
412 qdisc->cpu_qstats,
413 &qdisc->qstats, qlen);
414 sch->q.qlen += qlen;
415 } else {
416 sch->q.qlen += qdisc->q.qlen;
417 sch->bstats.bytes += qdisc->bstats.bytes;
418 sch->bstats.packets += qdisc->bstats.packets;
419 sch->qstats.backlog += qdisc->qstats.backlog;
420 sch->qstats.drops += qdisc->qstats.drops;
421 sch->qstats.requeues += qdisc->qstats.requeues;
422 sch->qstats.overlimits += qdisc->qstats.overlimits;
425 spin_unlock_bh(qdisc_lock(qdisc));
428 opt.num_tc = netdev_get_num_tc(dev);
429 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
430 opt.hw = priv->hw_offload;
432 for (tc = 0; tc < netdev_get_num_tc(dev); tc++) {
433 opt.count[tc] = dev->tc_to_txq[tc].count;
434 opt.offset[tc] = dev->tc_to_txq[tc].offset;
437 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
438 goto nla_put_failure;
440 if ((priv->flags & TC_MQPRIO_F_MODE) &&
441 nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode))
442 goto nla_put_failure;
444 if ((priv->flags & TC_MQPRIO_F_SHAPER) &&
445 nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper))
446 goto nla_put_failure;
448 if ((priv->flags & TC_MQPRIO_F_MIN_RATE ||
449 priv->flags & TC_MQPRIO_F_MAX_RATE) &&
450 (dump_rates(priv, &opt, skb) != 0))
451 goto nla_put_failure;
453 return nla_nest_end(skb, nla);
454 nla_put_failure:
455 nlmsg_trim(skb, nla);
456 return -1;
459 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
461 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
463 if (!dev_queue)
464 return NULL;
466 return dev_queue->qdisc_sleeping;
469 static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
471 struct net_device *dev = qdisc_dev(sch);
472 unsigned int ntx = TC_H_MIN(classid);
474 /* There are essentially two regions here that have valid classid
475 * values. The first region will have a classid value of 1 through
476 * num_tx_queues. All of these are backed by actual Qdiscs.
478 if (ntx < TC_H_MIN_PRIORITY)
479 return (ntx <= dev->num_tx_queues) ? ntx : 0;
481 /* The second region represents the hardware traffic classes. These
482 * are represented by classid values of TC_H_MIN_PRIORITY through
483 * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1
485 return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0;
488 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
489 struct sk_buff *skb, struct tcmsg *tcm)
491 if (cl < TC_H_MIN_PRIORITY) {
492 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
493 struct net_device *dev = qdisc_dev(sch);
494 int tc = netdev_txq_to_tc(dev, cl - 1);
496 tcm->tcm_parent = (tc < 0) ? 0 :
497 TC_H_MAKE(TC_H_MAJ(sch->handle),
498 TC_H_MIN(tc + TC_H_MIN_PRIORITY));
499 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
500 } else {
501 tcm->tcm_parent = TC_H_ROOT;
502 tcm->tcm_info = 0;
504 tcm->tcm_handle |= TC_H_MIN(cl);
505 return 0;
508 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
509 struct gnet_dump *d)
510 __releases(d->lock)
511 __acquires(d->lock)
513 if (cl >= TC_H_MIN_PRIORITY) {
514 int i;
515 __u32 qlen = 0;
516 struct gnet_stats_queue qstats = {0};
517 struct gnet_stats_basic_packed bstats = {0};
518 struct net_device *dev = qdisc_dev(sch);
519 struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
521 /* Drop lock here it will be reclaimed before touching
522 * statistics this is required because the d->lock we
523 * hold here is the look on dev_queue->qdisc_sleeping
524 * also acquired below.
526 if (d->lock)
527 spin_unlock_bh(d->lock);
529 for (i = tc.offset; i < tc.offset + tc.count; i++) {
530 struct netdev_queue *q = netdev_get_tx_queue(dev, i);
531 struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
532 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
533 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
535 spin_lock_bh(qdisc_lock(qdisc));
536 if (qdisc_is_percpu_stats(qdisc)) {
537 cpu_bstats = qdisc->cpu_bstats;
538 cpu_qstats = qdisc->cpu_qstats;
541 qlen = qdisc_qlen_sum(qdisc);
542 __gnet_stats_copy_basic(NULL, &sch->bstats,
543 cpu_bstats, &qdisc->bstats);
544 __gnet_stats_copy_queue(&sch->qstats,
545 cpu_qstats,
546 &qdisc->qstats,
547 qlen);
548 spin_unlock_bh(qdisc_lock(qdisc));
551 /* Reclaim root sleeping lock before completing stats */
552 if (d->lock)
553 spin_lock_bh(d->lock);
554 if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 ||
555 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
556 return -1;
557 } else {
558 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
560 sch = dev_queue->qdisc_sleeping;
561 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d,
562 sch->cpu_bstats, &sch->bstats) < 0 ||
563 qdisc_qstats_copy(d, sch) < 0)
564 return -1;
566 return 0;
569 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
571 struct net_device *dev = qdisc_dev(sch);
572 unsigned long ntx;
574 if (arg->stop)
575 return;
577 /* Walk hierarchy with a virtual class per tc */
578 arg->count = arg->skip;
579 for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) {
580 if (arg->fn(sch, ntx + TC_H_MIN_PRIORITY, arg) < 0) {
581 arg->stop = 1;
582 return;
584 arg->count++;
587 /* Pad the values and skip over unused traffic classes */
588 if (ntx < TC_MAX_QUEUE) {
589 arg->count = TC_MAX_QUEUE;
590 ntx = TC_MAX_QUEUE;
593 /* Reset offset, sort out remaining per-queue qdiscs */
594 for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) {
595 if (arg->fn(sch, ntx + 1, arg) < 0) {
596 arg->stop = 1;
597 return;
599 arg->count++;
603 static struct netdev_queue *mqprio_select_queue(struct Qdisc *sch,
604 struct tcmsg *tcm)
606 return mqprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
609 static const struct Qdisc_class_ops mqprio_class_ops = {
610 .graft = mqprio_graft,
611 .leaf = mqprio_leaf,
612 .find = mqprio_find,
613 .walk = mqprio_walk,
614 .dump = mqprio_dump_class,
615 .dump_stats = mqprio_dump_class_stats,
616 .select_queue = mqprio_select_queue,
619 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
620 .cl_ops = &mqprio_class_ops,
621 .id = "mqprio",
622 .priv_size = sizeof(struct mqprio_sched),
623 .init = mqprio_init,
624 .destroy = mqprio_destroy,
625 .attach = mqprio_attach,
626 .dump = mqprio_dump,
627 .owner = THIS_MODULE,
630 static int __init mqprio_module_init(void)
632 return register_qdisc(&mqprio_qdisc_ops);
635 static void __exit mqprio_module_exit(void)
637 unregister_qdisc(&mqprio_qdisc_ops);
640 module_init(mqprio_module_init);
641 module_exit(mqprio_module_exit);
643 MODULE_LICENSE("GPL");