1 // SPDX-License-Identifier: GPL-2.0-only
3 * net/sched/sch_mqprio.c
5 * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/kernel.h>
11 #include <linux/string.h>
12 #include <linux/errno.h>
13 #include <linux/skbuff.h>
14 #include <linux/module.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
17 #include <net/sch_generic.h>
18 #include <net/pkt_cls.h>
21 struct Qdisc
**qdiscs
;
26 u64 min_rate
[TC_QOPT_MAX_QUEUE
];
27 u64 max_rate
[TC_QOPT_MAX_QUEUE
];
30 static void mqprio_destroy(struct Qdisc
*sch
)
32 struct net_device
*dev
= qdisc_dev(sch
);
33 struct mqprio_sched
*priv
= qdisc_priv(sch
);
38 ntx
< dev
->num_tx_queues
&& priv
->qdiscs
[ntx
];
40 qdisc_put(priv
->qdiscs
[ntx
]);
44 if (priv
->hw_offload
&& dev
->netdev_ops
->ndo_setup_tc
) {
45 struct tc_mqprio_qopt_offload mqprio
= { { 0 } };
48 case TC_MQPRIO_MODE_DCB
:
49 case TC_MQPRIO_MODE_CHANNEL
:
50 dev
->netdev_ops
->ndo_setup_tc(dev
,
51 TC_SETUP_QDISC_MQPRIO
,
58 netdev_set_num_tc(dev
, 0);
62 static int mqprio_parse_opt(struct net_device
*dev
, struct tc_mqprio_qopt
*qopt
)
66 /* Verify num_tc is not out of max range */
67 if (qopt
->num_tc
> TC_MAX_QUEUE
)
70 /* Verify priority mapping uses valid tcs */
71 for (i
= 0; i
< TC_BITMASK
+ 1; i
++) {
72 if (qopt
->prio_tc_map
[i
] >= qopt
->num_tc
)
76 /* Limit qopt->hw to maximum supported offload value. Drivers have
77 * the option of overriding this later if they don't support the a
80 if (qopt
->hw
> TC_MQPRIO_HW_OFFLOAD_MAX
)
81 qopt
->hw
= TC_MQPRIO_HW_OFFLOAD_MAX
;
83 /* If hardware offload is requested we will leave it to the device
84 * to either populate the queue counts itself or to validate the
85 * provided queue counts. If ndo_setup_tc is not present then
86 * hardware doesn't support offload and we should return an error.
89 return dev
->netdev_ops
->ndo_setup_tc
? 0 : -EINVAL
;
91 for (i
= 0; i
< qopt
->num_tc
; i
++) {
92 unsigned int last
= qopt
->offset
[i
] + qopt
->count
[i
];
94 /* Verify the queue count is in tx range being equal to the
95 * real_num_tx_queues indicates the last queue is in use.
97 if (qopt
->offset
[i
] >= dev
->real_num_tx_queues
||
99 last
> dev
->real_num_tx_queues
)
102 /* Verify that the offset and counts do not overlap */
103 for (j
= i
+ 1; j
< qopt
->num_tc
; j
++) {
104 if (last
> qopt
->offset
[j
])
112 static const struct nla_policy mqprio_policy
[TCA_MQPRIO_MAX
+ 1] = {
113 [TCA_MQPRIO_MODE
] = { .len
= sizeof(u16
) },
114 [TCA_MQPRIO_SHAPER
] = { .len
= sizeof(u16
) },
115 [TCA_MQPRIO_MIN_RATE64
] = { .type
= NLA_NESTED
},
116 [TCA_MQPRIO_MAX_RATE64
] = { .type
= NLA_NESTED
},
119 static int parse_attr(struct nlattr
*tb
[], int maxtype
, struct nlattr
*nla
,
120 const struct nla_policy
*policy
, int len
)
122 int nested_len
= nla_len(nla
) - NLA_ALIGN(len
);
124 if (nested_len
>= nla_attr_size(0))
125 return nla_parse_deprecated(tb
, maxtype
,
126 nla_data(nla
) + NLA_ALIGN(len
),
127 nested_len
, policy
, NULL
);
129 memset(tb
, 0, sizeof(struct nlattr
*) * (maxtype
+ 1));
133 static int mqprio_init(struct Qdisc
*sch
, struct nlattr
*opt
,
134 struct netlink_ext_ack
*extack
)
136 struct net_device
*dev
= qdisc_dev(sch
);
137 struct mqprio_sched
*priv
= qdisc_priv(sch
);
138 struct netdev_queue
*dev_queue
;
140 int i
, err
= -EOPNOTSUPP
;
141 struct tc_mqprio_qopt
*qopt
= NULL
;
142 struct nlattr
*tb
[TCA_MQPRIO_MAX
+ 1];
147 BUILD_BUG_ON(TC_MAX_QUEUE
!= TC_QOPT_MAX_QUEUE
);
148 BUILD_BUG_ON(TC_BITMASK
!= TC_QOPT_BITMASK
);
150 if (sch
->parent
!= TC_H_ROOT
)
153 if (!netif_is_multiqueue(dev
))
156 /* make certain can allocate enough classids to handle queues */
157 if (dev
->num_tx_queues
>= TC_H_MIN_PRIORITY
)
160 if (!opt
|| nla_len(opt
) < sizeof(*qopt
))
163 qopt
= nla_data(opt
);
164 if (mqprio_parse_opt(dev
, qopt
))
167 len
= nla_len(opt
) - NLA_ALIGN(sizeof(*qopt
));
169 err
= parse_attr(tb
, TCA_MQPRIO_MAX
, opt
, mqprio_policy
,
177 if (tb
[TCA_MQPRIO_MODE
]) {
178 priv
->flags
|= TC_MQPRIO_F_MODE
;
179 priv
->mode
= *(u16
*)nla_data(tb
[TCA_MQPRIO_MODE
]);
182 if (tb
[TCA_MQPRIO_SHAPER
]) {
183 priv
->flags
|= TC_MQPRIO_F_SHAPER
;
184 priv
->shaper
= *(u16
*)nla_data(tb
[TCA_MQPRIO_SHAPER
]);
187 if (tb
[TCA_MQPRIO_MIN_RATE64
]) {
188 if (priv
->shaper
!= TC_MQPRIO_SHAPER_BW_RATE
)
191 nla_for_each_nested(attr
, tb
[TCA_MQPRIO_MIN_RATE64
],
193 if (nla_type(attr
) != TCA_MQPRIO_MIN_RATE64
)
195 if (i
>= qopt
->num_tc
)
197 priv
->min_rate
[i
] = *(u64
*)nla_data(attr
);
200 priv
->flags
|= TC_MQPRIO_F_MIN_RATE
;
203 if (tb
[TCA_MQPRIO_MAX_RATE64
]) {
204 if (priv
->shaper
!= TC_MQPRIO_SHAPER_BW_RATE
)
207 nla_for_each_nested(attr
, tb
[TCA_MQPRIO_MAX_RATE64
],
209 if (nla_type(attr
) != TCA_MQPRIO_MAX_RATE64
)
211 if (i
>= qopt
->num_tc
)
213 priv
->max_rate
[i
] = *(u64
*)nla_data(attr
);
216 priv
->flags
|= TC_MQPRIO_F_MAX_RATE
;
220 /* pre-allocate qdisc, attachment can't fail */
221 priv
->qdiscs
= kcalloc(dev
->num_tx_queues
, sizeof(priv
->qdiscs
[0]),
226 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
227 dev_queue
= netdev_get_tx_queue(dev
, i
);
228 qdisc
= qdisc_create_dflt(dev_queue
,
229 get_default_qdisc_ops(dev
, i
),
230 TC_H_MAKE(TC_H_MAJ(sch
->handle
),
231 TC_H_MIN(i
+ 1)), extack
);
235 priv
->qdiscs
[i
] = qdisc
;
236 qdisc
->flags
|= TCQ_F_ONETXQUEUE
| TCQ_F_NOPARENT
;
239 /* If the mqprio options indicate that hardware should own
240 * the queue mapping then run ndo_setup_tc otherwise use the
241 * supplied and verified mapping
244 struct tc_mqprio_qopt_offload mqprio
= {.qopt
= *qopt
};
246 switch (priv
->mode
) {
247 case TC_MQPRIO_MODE_DCB
:
248 if (priv
->shaper
!= TC_MQPRIO_SHAPER_DCB
)
251 case TC_MQPRIO_MODE_CHANNEL
:
252 mqprio
.flags
= priv
->flags
;
253 if (priv
->flags
& TC_MQPRIO_F_MODE
)
254 mqprio
.mode
= priv
->mode
;
255 if (priv
->flags
& TC_MQPRIO_F_SHAPER
)
256 mqprio
.shaper
= priv
->shaper
;
257 if (priv
->flags
& TC_MQPRIO_F_MIN_RATE
)
258 for (i
= 0; i
< mqprio
.qopt
.num_tc
; i
++)
259 mqprio
.min_rate
[i
] = priv
->min_rate
[i
];
260 if (priv
->flags
& TC_MQPRIO_F_MAX_RATE
)
261 for (i
= 0; i
< mqprio
.qopt
.num_tc
; i
++)
262 mqprio
.max_rate
[i
] = priv
->max_rate
[i
];
267 err
= dev
->netdev_ops
->ndo_setup_tc(dev
,
268 TC_SETUP_QDISC_MQPRIO
,
273 priv
->hw_offload
= mqprio
.qopt
.hw
;
275 netdev_set_num_tc(dev
, qopt
->num_tc
);
276 for (i
= 0; i
< qopt
->num_tc
; i
++)
277 netdev_set_tc_queue(dev
, i
,
278 qopt
->count
[i
], qopt
->offset
[i
]);
281 /* Always use supplied priority mappings */
282 for (i
= 0; i
< TC_BITMASK
+ 1; i
++)
283 netdev_set_prio_tc_map(dev
, i
, qopt
->prio_tc_map
[i
]);
285 sch
->flags
|= TCQ_F_MQROOT
;
289 static void mqprio_attach(struct Qdisc
*sch
)
291 struct net_device
*dev
= qdisc_dev(sch
);
292 struct mqprio_sched
*priv
= qdisc_priv(sch
);
293 struct Qdisc
*qdisc
, *old
;
296 /* Attach underlying qdisc */
297 for (ntx
= 0; ntx
< dev
->num_tx_queues
; ntx
++) {
298 qdisc
= priv
->qdiscs
[ntx
];
299 old
= dev_graft_qdisc(qdisc
->dev_queue
, qdisc
);
302 if (ntx
< dev
->real_num_tx_queues
)
303 qdisc_hash_add(qdisc
, false);
309 static struct netdev_queue
*mqprio_queue_get(struct Qdisc
*sch
,
312 struct net_device
*dev
= qdisc_dev(sch
);
313 unsigned long ntx
= cl
- 1;
315 if (ntx
>= dev
->num_tx_queues
)
317 return netdev_get_tx_queue(dev
, ntx
);
320 static int mqprio_graft(struct Qdisc
*sch
, unsigned long cl
, struct Qdisc
*new,
321 struct Qdisc
**old
, struct netlink_ext_ack
*extack
)
323 struct net_device
*dev
= qdisc_dev(sch
);
324 struct netdev_queue
*dev_queue
= mqprio_queue_get(sch
, cl
);
329 if (dev
->flags
& IFF_UP
)
332 *old
= dev_graft_qdisc(dev_queue
, new);
335 new->flags
|= TCQ_F_ONETXQUEUE
| TCQ_F_NOPARENT
;
337 if (dev
->flags
& IFF_UP
)
343 static int dump_rates(struct mqprio_sched
*priv
,
344 struct tc_mqprio_qopt
*opt
, struct sk_buff
*skb
)
349 if (priv
->flags
& TC_MQPRIO_F_MIN_RATE
) {
350 nest
= nla_nest_start_noflag(skb
, TCA_MQPRIO_MIN_RATE64
);
352 goto nla_put_failure
;
354 for (i
= 0; i
< opt
->num_tc
; i
++) {
355 if (nla_put(skb
, TCA_MQPRIO_MIN_RATE64
,
356 sizeof(priv
->min_rate
[i
]),
358 goto nla_put_failure
;
360 nla_nest_end(skb
, nest
);
363 if (priv
->flags
& TC_MQPRIO_F_MAX_RATE
) {
364 nest
= nla_nest_start_noflag(skb
, TCA_MQPRIO_MAX_RATE64
);
366 goto nla_put_failure
;
368 for (i
= 0; i
< opt
->num_tc
; i
++) {
369 if (nla_put(skb
, TCA_MQPRIO_MAX_RATE64
,
370 sizeof(priv
->max_rate
[i
]),
372 goto nla_put_failure
;
374 nla_nest_end(skb
, nest
);
379 nla_nest_cancel(skb
, nest
);
383 static int mqprio_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
385 struct net_device
*dev
= qdisc_dev(sch
);
386 struct mqprio_sched
*priv
= qdisc_priv(sch
);
387 struct nlattr
*nla
= (struct nlattr
*)skb_tail_pointer(skb
);
388 struct tc_mqprio_qopt opt
= { 0 };
390 unsigned int ntx
, tc
;
393 memset(&sch
->bstats
, 0, sizeof(sch
->bstats
));
394 memset(&sch
->qstats
, 0, sizeof(sch
->qstats
));
396 /* MQ supports lockless qdiscs. However, statistics accounting needs
397 * to account for all, none, or a mix of locked and unlocked child
398 * qdiscs. Percpu stats are added to counters in-band and locking
399 * qdisc totals are added at end.
401 for (ntx
= 0; ntx
< dev
->num_tx_queues
; ntx
++) {
402 qdisc
= netdev_get_tx_queue(dev
, ntx
)->qdisc_sleeping
;
403 spin_lock_bh(qdisc_lock(qdisc
));
405 if (qdisc_is_percpu_stats(qdisc
)) {
406 __u32 qlen
= qdisc_qlen_sum(qdisc
);
408 __gnet_stats_copy_basic(NULL
, &sch
->bstats
,
411 __gnet_stats_copy_queue(&sch
->qstats
,
413 &qdisc
->qstats
, qlen
);
416 sch
->q
.qlen
+= qdisc
->q
.qlen
;
417 sch
->bstats
.bytes
+= qdisc
->bstats
.bytes
;
418 sch
->bstats
.packets
+= qdisc
->bstats
.packets
;
419 sch
->qstats
.backlog
+= qdisc
->qstats
.backlog
;
420 sch
->qstats
.drops
+= qdisc
->qstats
.drops
;
421 sch
->qstats
.requeues
+= qdisc
->qstats
.requeues
;
422 sch
->qstats
.overlimits
+= qdisc
->qstats
.overlimits
;
425 spin_unlock_bh(qdisc_lock(qdisc
));
428 opt
.num_tc
= netdev_get_num_tc(dev
);
429 memcpy(opt
.prio_tc_map
, dev
->prio_tc_map
, sizeof(opt
.prio_tc_map
));
430 opt
.hw
= priv
->hw_offload
;
432 for (tc
= 0; tc
< netdev_get_num_tc(dev
); tc
++) {
433 opt
.count
[tc
] = dev
->tc_to_txq
[tc
].count
;
434 opt
.offset
[tc
] = dev
->tc_to_txq
[tc
].offset
;
437 if (nla_put(skb
, TCA_OPTIONS
, sizeof(opt
), &opt
))
438 goto nla_put_failure
;
440 if ((priv
->flags
& TC_MQPRIO_F_MODE
) &&
441 nla_put_u16(skb
, TCA_MQPRIO_MODE
, priv
->mode
))
442 goto nla_put_failure
;
444 if ((priv
->flags
& TC_MQPRIO_F_SHAPER
) &&
445 nla_put_u16(skb
, TCA_MQPRIO_SHAPER
, priv
->shaper
))
446 goto nla_put_failure
;
448 if ((priv
->flags
& TC_MQPRIO_F_MIN_RATE
||
449 priv
->flags
& TC_MQPRIO_F_MAX_RATE
) &&
450 (dump_rates(priv
, &opt
, skb
) != 0))
451 goto nla_put_failure
;
453 return nla_nest_end(skb
, nla
);
455 nlmsg_trim(skb
, nla
);
459 static struct Qdisc
*mqprio_leaf(struct Qdisc
*sch
, unsigned long cl
)
461 struct netdev_queue
*dev_queue
= mqprio_queue_get(sch
, cl
);
466 return dev_queue
->qdisc_sleeping
;
469 static unsigned long mqprio_find(struct Qdisc
*sch
, u32 classid
)
471 struct net_device
*dev
= qdisc_dev(sch
);
472 unsigned int ntx
= TC_H_MIN(classid
);
474 /* There are essentially two regions here that have valid classid
475 * values. The first region will have a classid value of 1 through
476 * num_tx_queues. All of these are backed by actual Qdiscs.
478 if (ntx
< TC_H_MIN_PRIORITY
)
479 return (ntx
<= dev
->num_tx_queues
) ? ntx
: 0;
481 /* The second region represents the hardware traffic classes. These
482 * are represented by classid values of TC_H_MIN_PRIORITY through
483 * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1
485 return ((ntx
- TC_H_MIN_PRIORITY
) < netdev_get_num_tc(dev
)) ? ntx
: 0;
488 static int mqprio_dump_class(struct Qdisc
*sch
, unsigned long cl
,
489 struct sk_buff
*skb
, struct tcmsg
*tcm
)
491 if (cl
< TC_H_MIN_PRIORITY
) {
492 struct netdev_queue
*dev_queue
= mqprio_queue_get(sch
, cl
);
493 struct net_device
*dev
= qdisc_dev(sch
);
494 int tc
= netdev_txq_to_tc(dev
, cl
- 1);
496 tcm
->tcm_parent
= (tc
< 0) ? 0 :
497 TC_H_MAKE(TC_H_MAJ(sch
->handle
),
498 TC_H_MIN(tc
+ TC_H_MIN_PRIORITY
));
499 tcm
->tcm_info
= dev_queue
->qdisc_sleeping
->handle
;
501 tcm
->tcm_parent
= TC_H_ROOT
;
504 tcm
->tcm_handle
|= TC_H_MIN(cl
);
508 static int mqprio_dump_class_stats(struct Qdisc
*sch
, unsigned long cl
,
513 if (cl
>= TC_H_MIN_PRIORITY
) {
516 struct gnet_stats_queue qstats
= {0};
517 struct gnet_stats_basic_packed bstats
= {0};
518 struct net_device
*dev
= qdisc_dev(sch
);
519 struct netdev_tc_txq tc
= dev
->tc_to_txq
[cl
& TC_BITMASK
];
521 /* Drop lock here it will be reclaimed before touching
522 * statistics this is required because the d->lock we
523 * hold here is the look on dev_queue->qdisc_sleeping
524 * also acquired below.
527 spin_unlock_bh(d
->lock
);
529 for (i
= tc
.offset
; i
< tc
.offset
+ tc
.count
; i
++) {
530 struct netdev_queue
*q
= netdev_get_tx_queue(dev
, i
);
531 struct Qdisc
*qdisc
= rtnl_dereference(q
->qdisc
);
532 struct gnet_stats_basic_cpu __percpu
*cpu_bstats
= NULL
;
533 struct gnet_stats_queue __percpu
*cpu_qstats
= NULL
;
535 spin_lock_bh(qdisc_lock(qdisc
));
536 if (qdisc_is_percpu_stats(qdisc
)) {
537 cpu_bstats
= qdisc
->cpu_bstats
;
538 cpu_qstats
= qdisc
->cpu_qstats
;
541 qlen
= qdisc_qlen_sum(qdisc
);
542 __gnet_stats_copy_basic(NULL
, &sch
->bstats
,
543 cpu_bstats
, &qdisc
->bstats
);
544 __gnet_stats_copy_queue(&sch
->qstats
,
548 spin_unlock_bh(qdisc_lock(qdisc
));
551 /* Reclaim root sleeping lock before completing stats */
553 spin_lock_bh(d
->lock
);
554 if (gnet_stats_copy_basic(NULL
, d
, NULL
, &bstats
) < 0 ||
555 gnet_stats_copy_queue(d
, NULL
, &qstats
, qlen
) < 0)
558 struct netdev_queue
*dev_queue
= mqprio_queue_get(sch
, cl
);
560 sch
= dev_queue
->qdisc_sleeping
;
561 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch
), d
,
562 sch
->cpu_bstats
, &sch
->bstats
) < 0 ||
563 qdisc_qstats_copy(d
, sch
) < 0)
569 static void mqprio_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
571 struct net_device
*dev
= qdisc_dev(sch
);
577 /* Walk hierarchy with a virtual class per tc */
578 arg
->count
= arg
->skip
;
579 for (ntx
= arg
->skip
; ntx
< netdev_get_num_tc(dev
); ntx
++) {
580 if (arg
->fn(sch
, ntx
+ TC_H_MIN_PRIORITY
, arg
) < 0) {
587 /* Pad the values and skip over unused traffic classes */
588 if (ntx
< TC_MAX_QUEUE
) {
589 arg
->count
= TC_MAX_QUEUE
;
593 /* Reset offset, sort out remaining per-queue qdiscs */
594 for (ntx
-= TC_MAX_QUEUE
; ntx
< dev
->num_tx_queues
; ntx
++) {
595 if (arg
->fn(sch
, ntx
+ 1, arg
) < 0) {
603 static struct netdev_queue
*mqprio_select_queue(struct Qdisc
*sch
,
606 return mqprio_queue_get(sch
, TC_H_MIN(tcm
->tcm_parent
));
609 static const struct Qdisc_class_ops mqprio_class_ops
= {
610 .graft
= mqprio_graft
,
614 .dump
= mqprio_dump_class
,
615 .dump_stats
= mqprio_dump_class_stats
,
616 .select_queue
= mqprio_select_queue
,
619 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly
= {
620 .cl_ops
= &mqprio_class_ops
,
622 .priv_size
= sizeof(struct mqprio_sched
),
624 .destroy
= mqprio_destroy
,
625 .attach
= mqprio_attach
,
627 .owner
= THIS_MODULE
,
630 static int __init
mqprio_module_init(void)
632 return register_qdisc(&mqprio_qdisc_ops
);
635 static void __exit
mqprio_module_exit(void)
637 unregister_qdisc(&mqprio_qdisc_ops
);
640 module_init(mqprio_module_init
);
641 module_exit(mqprio_module_exit
);
643 MODULE_LICENSE("GPL");