2 * net/sched/sch_generic.c Generic packet scheduler routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601
14 #include <linux/bitops.h>
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/rtnetlink.h>
24 #include <linux/init.h>
25 #include <linux/rcupdate.h>
26 #include <linux/list.h>
27 #include <linux/slab.h>
28 #include <linux/if_vlan.h>
29 #include <linux/skb_array.h>
30 #include <linux/if_macvlan.h>
31 #include <net/sch_generic.h>
32 #include <net/pkt_sched.h>
34 #include <trace/events/qdisc.h>
37 /* Qdisc to use by default */
38 const struct Qdisc_ops
*default_qdisc_ops
= &pfifo_fast_ops
;
39 EXPORT_SYMBOL(default_qdisc_ops
);
41 /* Main transmission queue. */
43 /* Modifications to data participating in scheduling must be protected with
44 * qdisc_lock(qdisc) spinlock.
46 * The idea is the following:
47 * - enqueue, dequeue are serialized via qdisc root lock
48 * - ingress filtering is also serialized via qdisc root lock
49 * - updates to tree and tree walking are only done under the rtnl mutex.
52 static inline struct sk_buff
*__skb_dequeue_bad_txq(struct Qdisc
*q
)
54 const struct netdev_queue
*txq
= q
->dev_queue
;
55 spinlock_t
*lock
= NULL
;
58 if (q
->flags
& TCQ_F_NOLOCK
) {
63 skb
= skb_peek(&q
->skb_bad_txq
);
65 /* check the reason of requeuing without tx lock first */
66 txq
= skb_get_tx_queue(txq
->dev
, skb
);
67 if (!netif_xmit_frozen_or_stopped(txq
)) {
68 skb
= __skb_dequeue(&q
->skb_bad_txq
);
69 if (qdisc_is_percpu_stats(q
)) {
70 qdisc_qstats_cpu_backlog_dec(q
, skb
);
71 qdisc_qstats_cpu_qlen_dec(q
);
73 qdisc_qstats_backlog_dec(q
, skb
);
87 static inline struct sk_buff
*qdisc_dequeue_skb_bad_txq(struct Qdisc
*q
)
89 struct sk_buff
*skb
= skb_peek(&q
->skb_bad_txq
);
92 skb
= __skb_dequeue_bad_txq(q
);
97 static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc
*q
,
100 spinlock_t
*lock
= NULL
;
102 if (q
->flags
& TCQ_F_NOLOCK
) {
103 lock
= qdisc_lock(q
);
107 __skb_queue_tail(&q
->skb_bad_txq
, skb
);
113 static inline int __dev_requeue_skb(struct sk_buff
*skb
, struct Qdisc
*q
)
116 struct sk_buff
*next
= skb
->next
;
118 __skb_queue_tail(&q
->gso_skb
, skb
);
119 q
->qstats
.requeues
++;
120 qdisc_qstats_backlog_inc(q
, skb
);
121 q
->q
.qlen
++; /* it's still part of the queue */
130 static inline int dev_requeue_skb_locked(struct sk_buff
*skb
, struct Qdisc
*q
)
132 spinlock_t
*lock
= qdisc_lock(q
);
136 struct sk_buff
*next
= skb
->next
;
138 __skb_queue_tail(&q
->gso_skb
, skb
);
140 qdisc_qstats_cpu_requeues_inc(q
);
141 qdisc_qstats_cpu_backlog_inc(q
, skb
);
142 qdisc_qstats_cpu_qlen_inc(q
);
153 static inline int dev_requeue_skb(struct sk_buff
*skb
, struct Qdisc
*q
)
155 if (q
->flags
& TCQ_F_NOLOCK
)
156 return dev_requeue_skb_locked(skb
, q
);
158 return __dev_requeue_skb(skb
, q
);
161 static void try_bulk_dequeue_skb(struct Qdisc
*q
,
163 const struct netdev_queue
*txq
,
166 int bytelimit
= qdisc_avail_bulklimit(txq
) - skb
->len
;
168 while (bytelimit
> 0) {
169 struct sk_buff
*nskb
= q
->dequeue(q
);
174 bytelimit
-= nskb
->len
; /* covers GSO len */
177 (*packets
)++; /* GSO counts as one pkt */
182 /* This variant of try_bulk_dequeue_skb() makes sure
183 * all skbs in the chain are for the same txq
185 static void try_bulk_dequeue_skb_slow(struct Qdisc
*q
,
189 int mapping
= skb_get_queue_mapping(skb
);
190 struct sk_buff
*nskb
;
194 nskb
= q
->dequeue(q
);
197 if (unlikely(skb_get_queue_mapping(nskb
) != mapping
)) {
198 qdisc_enqueue_skb_bad_txq(q
, nskb
);
200 if (qdisc_is_percpu_stats(q
)) {
201 qdisc_qstats_cpu_backlog_inc(q
, nskb
);
202 qdisc_qstats_cpu_qlen_inc(q
);
204 qdisc_qstats_backlog_inc(q
, nskb
);
216 /* Note that dequeue_skb can possibly return a SKB list (via skb->next).
217 * A requeued skb (via q->gso_skb) can also be a SKB list.
219 static struct sk_buff
*dequeue_skb(struct Qdisc
*q
, bool *validate
,
222 const struct netdev_queue
*txq
= q
->dev_queue
;
223 struct sk_buff
*skb
= NULL
;
226 if (unlikely(!skb_queue_empty(&q
->gso_skb
))) {
227 spinlock_t
*lock
= NULL
;
229 if (q
->flags
& TCQ_F_NOLOCK
) {
230 lock
= qdisc_lock(q
);
234 skb
= skb_peek(&q
->gso_skb
);
236 /* skb may be null if another cpu pulls gso_skb off in between
237 * empty check and lock.
245 /* skb in gso_skb were already validated */
247 if (xfrm_offload(skb
))
249 /* check the reason of requeuing without tx lock first */
250 txq
= skb_get_tx_queue(txq
->dev
, skb
);
251 if (!netif_xmit_frozen_or_stopped(txq
)) {
252 skb
= __skb_dequeue(&q
->gso_skb
);
253 if (qdisc_is_percpu_stats(q
)) {
254 qdisc_qstats_cpu_backlog_dec(q
, skb
);
255 qdisc_qstats_cpu_qlen_dec(q
);
257 qdisc_qstats_backlog_dec(q
, skb
);
270 if ((q
->flags
& TCQ_F_ONETXQUEUE
) &&
271 netif_xmit_frozen_or_stopped(txq
))
274 skb
= qdisc_dequeue_skb_bad_txq(q
);
280 if (qdisc_may_bulk(q
))
281 try_bulk_dequeue_skb(q
, skb
, txq
, packets
);
283 try_bulk_dequeue_skb_slow(q
, skb
, packets
);
286 trace_qdisc_dequeue(q
, txq
, *packets
, skb
);
291 * Transmit possibly several skbs, and handle the return status as
292 * required. Owning running seqcount bit guarantees that
293 * only one CPU can execute this function.
295 * Returns to the caller:
296 * false - hardware queue frozen backoff
297 * true - feel free to send more pkts
299 bool sch_direct_xmit(struct sk_buff
*skb
, struct Qdisc
*q
,
300 struct net_device
*dev
, struct netdev_queue
*txq
,
301 spinlock_t
*root_lock
, bool validate
)
303 int ret
= NETDEV_TX_BUSY
;
306 /* And release qdisc */
308 spin_unlock(root_lock
);
310 /* Note that we validate skb (GSO, checksum, ...) outside of locks */
312 skb
= validate_xmit_skb_list(skb
, dev
, &again
);
314 #ifdef CONFIG_XFRM_OFFLOAD
315 if (unlikely(again
)) {
317 spin_lock(root_lock
);
319 dev_requeue_skb(skb
, q
);
325 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
326 if (!netif_xmit_frozen_or_stopped(txq
))
327 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &ret
);
329 HARD_TX_UNLOCK(dev
, txq
);
332 spin_lock(root_lock
);
337 spin_lock(root_lock
);
339 if (!dev_xmit_complete(ret
)) {
340 /* Driver returned NETDEV_TX_BUSY - requeue skb */
341 if (unlikely(ret
!= NETDEV_TX_BUSY
))
342 net_warn_ratelimited("BUG %s code %d qlen %d\n",
343 dev
->name
, ret
, q
->q
.qlen
);
345 dev_requeue_skb(skb
, q
);
349 if (ret
&& netif_xmit_frozen_or_stopped(txq
))
356 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
358 * running seqcount guarantees only one CPU can process
359 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
362 * netif_tx_lock serializes accesses to device driver.
364 * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
365 * if one is grabbed, another must be free.
367 * Note, that this procedure can be called by a watchdog timer
369 * Returns to the caller:
370 * 0 - queue is empty or throttled.
371 * >0 - queue is not empty.
374 static inline bool qdisc_restart(struct Qdisc
*q
, int *packets
)
376 spinlock_t
*root_lock
= NULL
;
377 struct netdev_queue
*txq
;
378 struct net_device
*dev
;
383 skb
= dequeue_skb(q
, &validate
, packets
);
387 if (!(q
->flags
& TCQ_F_NOLOCK
))
388 root_lock
= qdisc_lock(q
);
391 txq
= skb_get_tx_queue(dev
, skb
);
393 return sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
, validate
);
396 void __qdisc_run(struct Qdisc
*q
)
398 int quota
= dev_tx_weight
;
401 while (qdisc_restart(q
, &packets
)) {
403 * Ordered by possible occurrence: Postpone processing if
404 * 1. we've exceeded packet quota
405 * 2. another process needs the CPU;
408 if (quota
<= 0 || need_resched()) {
415 unsigned long dev_trans_start(struct net_device
*dev
)
417 unsigned long val
, res
;
420 if (is_vlan_dev(dev
))
421 dev
= vlan_dev_real_dev(dev
);
422 else if (netif_is_macvlan(dev
))
423 dev
= macvlan_dev_real_dev(dev
);
424 res
= netdev_get_tx_queue(dev
, 0)->trans_start
;
425 for (i
= 1; i
< dev
->num_tx_queues
; i
++) {
426 val
= netdev_get_tx_queue(dev
, i
)->trans_start
;
427 if (val
&& time_after(val
, res
))
433 EXPORT_SYMBOL(dev_trans_start
);
435 static void dev_watchdog(struct timer_list
*t
)
437 struct net_device
*dev
= from_timer(dev
, t
, watchdog_timer
);
440 if (!qdisc_tx_is_noop(dev
)) {
441 if (netif_device_present(dev
) &&
442 netif_running(dev
) &&
443 netif_carrier_ok(dev
)) {
444 int some_queue_timedout
= 0;
446 unsigned long trans_start
;
448 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
449 struct netdev_queue
*txq
;
451 txq
= netdev_get_tx_queue(dev
, i
);
452 trans_start
= txq
->trans_start
;
453 if (netif_xmit_stopped(txq
) &&
454 time_after(jiffies
, (trans_start
+
455 dev
->watchdog_timeo
))) {
456 some_queue_timedout
= 1;
457 txq
->trans_timeout
++;
462 if (some_queue_timedout
) {
463 WARN_ONCE(1, KERN_INFO
"NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
464 dev
->name
, netdev_drivername(dev
), i
);
465 dev
->netdev_ops
->ndo_tx_timeout(dev
);
467 if (!mod_timer(&dev
->watchdog_timer
,
468 round_jiffies(jiffies
+
469 dev
->watchdog_timeo
)))
473 netif_tx_unlock(dev
);
478 void __netdev_watchdog_up(struct net_device
*dev
)
480 if (dev
->netdev_ops
->ndo_tx_timeout
) {
481 if (dev
->watchdog_timeo
<= 0)
482 dev
->watchdog_timeo
= 5*HZ
;
483 if (!mod_timer(&dev
->watchdog_timer
,
484 round_jiffies(jiffies
+ dev
->watchdog_timeo
)))
489 static void dev_watchdog_up(struct net_device
*dev
)
491 __netdev_watchdog_up(dev
);
494 static void dev_watchdog_down(struct net_device
*dev
)
496 netif_tx_lock_bh(dev
);
497 if (del_timer(&dev
->watchdog_timer
))
499 netif_tx_unlock_bh(dev
);
503 * netif_carrier_on - set carrier
504 * @dev: network device
506 * Device has detected that carrier.
508 void netif_carrier_on(struct net_device
*dev
)
510 if (test_and_clear_bit(__LINK_STATE_NOCARRIER
, &dev
->state
)) {
511 if (dev
->reg_state
== NETREG_UNINITIALIZED
)
513 atomic_inc(&dev
->carrier_up_count
);
514 linkwatch_fire_event(dev
);
515 if (netif_running(dev
))
516 __netdev_watchdog_up(dev
);
519 EXPORT_SYMBOL(netif_carrier_on
);
522 * netif_carrier_off - clear carrier
523 * @dev: network device
525 * Device has detected loss of carrier.
527 void netif_carrier_off(struct net_device
*dev
)
529 if (!test_and_set_bit(__LINK_STATE_NOCARRIER
, &dev
->state
)) {
530 if (dev
->reg_state
== NETREG_UNINITIALIZED
)
532 atomic_inc(&dev
->carrier_down_count
);
533 linkwatch_fire_event(dev
);
536 EXPORT_SYMBOL(netif_carrier_off
);
538 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
539 under all circumstances. It is difficult to invent anything faster or
543 static int noop_enqueue(struct sk_buff
*skb
, struct Qdisc
*qdisc
,
544 struct sk_buff
**to_free
)
546 __qdisc_drop(skb
, to_free
);
550 static struct sk_buff
*noop_dequeue(struct Qdisc
*qdisc
)
555 struct Qdisc_ops noop_qdisc_ops __read_mostly
= {
558 .enqueue
= noop_enqueue
,
559 .dequeue
= noop_dequeue
,
560 .peek
= noop_dequeue
,
561 .owner
= THIS_MODULE
,
564 static struct netdev_queue noop_netdev_queue
= {
565 .qdisc
= &noop_qdisc
,
566 .qdisc_sleeping
= &noop_qdisc
,
569 struct Qdisc noop_qdisc
= {
570 .enqueue
= noop_enqueue
,
571 .dequeue
= noop_dequeue
,
572 .flags
= TCQ_F_BUILTIN
,
573 .ops
= &noop_qdisc_ops
,
574 .q
.lock
= __SPIN_LOCK_UNLOCKED(noop_qdisc
.q
.lock
),
575 .dev_queue
= &noop_netdev_queue
,
576 .running
= SEQCNT_ZERO(noop_qdisc
.running
),
577 .busylock
= __SPIN_LOCK_UNLOCKED(noop_qdisc
.busylock
),
579 EXPORT_SYMBOL(noop_qdisc
);
581 static int noqueue_init(struct Qdisc
*qdisc
, struct nlattr
*opt
,
582 struct netlink_ext_ack
*extack
)
584 /* register_qdisc() assigns a default of noop_enqueue if unset,
585 * but __dev_queue_xmit() treats noqueue only as such
586 * if this is NULL - so clear it here. */
587 qdisc
->enqueue
= NULL
;
591 struct Qdisc_ops noqueue_qdisc_ops __read_mostly
= {
594 .init
= noqueue_init
,
595 .enqueue
= noop_enqueue
,
596 .dequeue
= noop_dequeue
,
597 .peek
= noop_dequeue
,
598 .owner
= THIS_MODULE
,
601 static const u8 prio2band
[TC_PRIO_MAX
+ 1] = {
602 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
605 /* 3-band FIFO queue: old style, but should be a bit faster than
606 generic prio+fifo combination.
609 #define PFIFO_FAST_BANDS 3
612 * Private data for a pfifo_fast scheduler containing:
613 * - rings for priority bands
615 struct pfifo_fast_priv
{
616 struct skb_array q
[PFIFO_FAST_BANDS
];
619 static inline struct skb_array
*band2list(struct pfifo_fast_priv
*priv
,
622 return &priv
->q
[band
];
625 static int pfifo_fast_enqueue(struct sk_buff
*skb
, struct Qdisc
*qdisc
,
626 struct sk_buff
**to_free
)
628 int band
= prio2band
[skb
->priority
& TC_PRIO_MAX
];
629 struct pfifo_fast_priv
*priv
= qdisc_priv(qdisc
);
630 struct skb_array
*q
= band2list(priv
, band
);
633 err
= skb_array_produce(q
, skb
);
636 return qdisc_drop_cpu(skb
, qdisc
, to_free
);
638 qdisc_qstats_cpu_qlen_inc(qdisc
);
639 qdisc_qstats_cpu_backlog_inc(qdisc
, skb
);
640 return NET_XMIT_SUCCESS
;
643 static struct sk_buff
*pfifo_fast_dequeue(struct Qdisc
*qdisc
)
645 struct pfifo_fast_priv
*priv
= qdisc_priv(qdisc
);
646 struct sk_buff
*skb
= NULL
;
649 for (band
= 0; band
< PFIFO_FAST_BANDS
&& !skb
; band
++) {
650 struct skb_array
*q
= band2list(priv
, band
);
652 if (__skb_array_empty(q
))
655 skb
= skb_array_consume_bh(q
);
658 qdisc_qstats_cpu_backlog_dec(qdisc
, skb
);
659 qdisc_bstats_cpu_update(qdisc
, skb
);
660 qdisc_qstats_cpu_qlen_dec(qdisc
);
666 static struct sk_buff
*pfifo_fast_peek(struct Qdisc
*qdisc
)
668 struct pfifo_fast_priv
*priv
= qdisc_priv(qdisc
);
669 struct sk_buff
*skb
= NULL
;
672 for (band
= 0; band
< PFIFO_FAST_BANDS
&& !skb
; band
++) {
673 struct skb_array
*q
= band2list(priv
, band
);
675 skb
= __skb_array_peek(q
);
681 static void pfifo_fast_reset(struct Qdisc
*qdisc
)
684 struct pfifo_fast_priv
*priv
= qdisc_priv(qdisc
);
686 for (band
= 0; band
< PFIFO_FAST_BANDS
; band
++) {
687 struct skb_array
*q
= band2list(priv
, band
);
690 /* NULL ring is possible if destroy path is due to a failed
691 * skb_array_init() in pfifo_fast_init() case.
696 while ((skb
= skb_array_consume_bh(q
)) != NULL
)
700 for_each_possible_cpu(i
) {
701 struct gnet_stats_queue
*q
= per_cpu_ptr(qdisc
->cpu_qstats
, i
);
708 static int pfifo_fast_dump(struct Qdisc
*qdisc
, struct sk_buff
*skb
)
710 struct tc_prio_qopt opt
= { .bands
= PFIFO_FAST_BANDS
};
712 memcpy(&opt
.priomap
, prio2band
, TC_PRIO_MAX
+ 1);
713 if (nla_put(skb
, TCA_OPTIONS
, sizeof(opt
), &opt
))
714 goto nla_put_failure
;
721 static int pfifo_fast_init(struct Qdisc
*qdisc
, struct nlattr
*opt
,
722 struct netlink_ext_ack
*extack
)
724 unsigned int qlen
= qdisc_dev(qdisc
)->tx_queue_len
;
725 struct pfifo_fast_priv
*priv
= qdisc_priv(qdisc
);
728 /* guard against zero length rings */
732 for (prio
= 0; prio
< PFIFO_FAST_BANDS
; prio
++) {
733 struct skb_array
*q
= band2list(priv
, prio
);
736 err
= skb_array_init(q
, qlen
, GFP_KERNEL
);
741 /* Can by-pass the queue discipline */
742 qdisc
->flags
|= TCQ_F_CAN_BYPASS
;
746 static void pfifo_fast_destroy(struct Qdisc
*sch
)
748 struct pfifo_fast_priv
*priv
= qdisc_priv(sch
);
751 for (prio
= 0; prio
< PFIFO_FAST_BANDS
; prio
++) {
752 struct skb_array
*q
= band2list(priv
, prio
);
754 /* NULL ring is possible if destroy path is due to a failed
755 * skb_array_init() in pfifo_fast_init() case.
759 /* Destroy ring but no need to kfree_skb because a call to
760 * pfifo_fast_reset() has already done that work.
762 ptr_ring_cleanup(&q
->ring
, NULL
);
766 static int pfifo_fast_change_tx_queue_len(struct Qdisc
*sch
,
767 unsigned int new_len
)
769 struct pfifo_fast_priv
*priv
= qdisc_priv(sch
);
770 struct skb_array
*bands
[PFIFO_FAST_BANDS
];
773 for (prio
= 0; prio
< PFIFO_FAST_BANDS
; prio
++) {
774 struct skb_array
*q
= band2list(priv
, prio
);
779 return skb_array_resize_multiple(bands
, PFIFO_FAST_BANDS
, new_len
,
783 struct Qdisc_ops pfifo_fast_ops __read_mostly
= {
785 .priv_size
= sizeof(struct pfifo_fast_priv
),
786 .enqueue
= pfifo_fast_enqueue
,
787 .dequeue
= pfifo_fast_dequeue
,
788 .peek
= pfifo_fast_peek
,
789 .init
= pfifo_fast_init
,
790 .destroy
= pfifo_fast_destroy
,
791 .reset
= pfifo_fast_reset
,
792 .dump
= pfifo_fast_dump
,
793 .change_tx_queue_len
= pfifo_fast_change_tx_queue_len
,
794 .owner
= THIS_MODULE
,
795 .static_flags
= TCQ_F_NOLOCK
| TCQ_F_CPUSTATS
,
797 EXPORT_SYMBOL(pfifo_fast_ops
);
799 static struct lock_class_key qdisc_tx_busylock
;
800 static struct lock_class_key qdisc_running_key
;
802 struct Qdisc
*qdisc_alloc(struct netdev_queue
*dev_queue
,
803 const struct Qdisc_ops
*ops
,
804 struct netlink_ext_ack
*extack
)
808 unsigned int size
= QDISC_ALIGN(sizeof(*sch
)) + ops
->priv_size
;
810 struct net_device
*dev
;
813 NL_SET_ERR_MSG(extack
, "No device queue given");
818 dev
= dev_queue
->dev
;
819 p
= kzalloc_node(size
, GFP_KERNEL
,
820 netdev_queue_numa_node_read(dev_queue
));
824 sch
= (struct Qdisc
*) QDISC_ALIGN((unsigned long) p
);
825 /* if we got non aligned memory, ask more and do alignment ourself */
828 p
= kzalloc_node(size
+ QDISC_ALIGNTO
- 1, GFP_KERNEL
,
829 netdev_queue_numa_node_read(dev_queue
));
832 sch
= (struct Qdisc
*) QDISC_ALIGN((unsigned long) p
);
833 sch
->padded
= (char *) sch
- (char *) p
;
835 __skb_queue_head_init(&sch
->gso_skb
);
836 __skb_queue_head_init(&sch
->skb_bad_txq
);
837 qdisc_skb_head_init(&sch
->q
);
838 spin_lock_init(&sch
->q
.lock
);
840 if (ops
->static_flags
& TCQ_F_CPUSTATS
) {
842 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu
);
843 if (!sch
->cpu_bstats
)
846 sch
->cpu_qstats
= alloc_percpu(struct gnet_stats_queue
);
847 if (!sch
->cpu_qstats
) {
848 free_percpu(sch
->cpu_bstats
);
853 spin_lock_init(&sch
->busylock
);
854 lockdep_set_class(&sch
->busylock
,
855 dev
->qdisc_tx_busylock
?: &qdisc_tx_busylock
);
857 seqcount_init(&sch
->running
);
858 lockdep_set_class(&sch
->running
,
859 dev
->qdisc_running_key
?: &qdisc_running_key
);
862 sch
->flags
= ops
->static_flags
;
863 sch
->enqueue
= ops
->enqueue
;
864 sch
->dequeue
= ops
->dequeue
;
865 sch
->dev_queue
= dev_queue
;
867 refcount_set(&sch
->refcnt
, 1);
876 struct Qdisc
*qdisc_create_dflt(struct netdev_queue
*dev_queue
,
877 const struct Qdisc_ops
*ops
,
878 unsigned int parentid
,
879 struct netlink_ext_ack
*extack
)
883 if (!try_module_get(ops
->owner
)) {
884 NL_SET_ERR_MSG(extack
, "Failed to increase module reference counter");
888 sch
= qdisc_alloc(dev_queue
, ops
, extack
);
890 module_put(ops
->owner
);
893 sch
->parent
= parentid
;
895 if (!ops
->init
|| ops
->init(sch
, NULL
, extack
) == 0)
901 EXPORT_SYMBOL(qdisc_create_dflt
);
903 /* Under qdisc_lock(qdisc) and BH! */
905 void qdisc_reset(struct Qdisc
*qdisc
)
907 const struct Qdisc_ops
*ops
= qdisc
->ops
;
908 struct sk_buff
*skb
, *tmp
;
913 skb_queue_walk_safe(&qdisc
->gso_skb
, skb
, tmp
) {
914 __skb_unlink(skb
, &qdisc
->gso_skb
);
918 skb_queue_walk_safe(&qdisc
->skb_bad_txq
, skb
, tmp
) {
919 __skb_unlink(skb
, &qdisc
->skb_bad_txq
);
924 qdisc
->qstats
.backlog
= 0;
926 EXPORT_SYMBOL(qdisc_reset
);
928 void qdisc_free(struct Qdisc
*qdisc
)
930 if (qdisc_is_percpu_stats(qdisc
)) {
931 free_percpu(qdisc
->cpu_bstats
);
932 free_percpu(qdisc
->cpu_qstats
);
935 kfree((char *) qdisc
- qdisc
->padded
);
938 void qdisc_destroy(struct Qdisc
*qdisc
)
940 const struct Qdisc_ops
*ops
= qdisc
->ops
;
941 struct sk_buff
*skb
, *tmp
;
943 if (qdisc
->flags
& TCQ_F_BUILTIN
||
944 !refcount_dec_and_test(&qdisc
->refcnt
))
947 #ifdef CONFIG_NET_SCHED
948 qdisc_hash_del(qdisc
);
950 qdisc_put_stab(rtnl_dereference(qdisc
->stab
));
952 gen_kill_estimator(&qdisc
->rate_est
);
958 module_put(ops
->owner
);
959 dev_put(qdisc_dev(qdisc
));
961 skb_queue_walk_safe(&qdisc
->gso_skb
, skb
, tmp
) {
962 __skb_unlink(skb
, &qdisc
->gso_skb
);
966 skb_queue_walk_safe(&qdisc
->skb_bad_txq
, skb
, tmp
) {
967 __skb_unlink(skb
, &qdisc
->skb_bad_txq
);
973 EXPORT_SYMBOL(qdisc_destroy
);
975 /* Attach toplevel qdisc to device queue. */
976 struct Qdisc
*dev_graft_qdisc(struct netdev_queue
*dev_queue
,
979 struct Qdisc
*oqdisc
= dev_queue
->qdisc_sleeping
;
980 spinlock_t
*root_lock
;
982 root_lock
= qdisc_lock(oqdisc
);
983 spin_lock_bh(root_lock
);
985 /* ... and graft new one */
988 dev_queue
->qdisc_sleeping
= qdisc
;
989 rcu_assign_pointer(dev_queue
->qdisc
, &noop_qdisc
);
991 spin_unlock_bh(root_lock
);
995 EXPORT_SYMBOL(dev_graft_qdisc
);
997 static void attach_one_default_qdisc(struct net_device
*dev
,
998 struct netdev_queue
*dev_queue
,
1001 struct Qdisc
*qdisc
;
1002 const struct Qdisc_ops
*ops
= default_qdisc_ops
;
1004 if (dev
->priv_flags
& IFF_NO_QUEUE
)
1005 ops
= &noqueue_qdisc_ops
;
1007 qdisc
= qdisc_create_dflt(dev_queue
, ops
, TC_H_ROOT
, NULL
);
1009 netdev_info(dev
, "activation failed\n");
1012 if (!netif_is_multiqueue(dev
))
1013 qdisc
->flags
|= TCQ_F_ONETXQUEUE
| TCQ_F_NOPARENT
;
1014 dev_queue
->qdisc_sleeping
= qdisc
;
1017 static void attach_default_qdiscs(struct net_device
*dev
)
1019 struct netdev_queue
*txq
;
1020 struct Qdisc
*qdisc
;
1022 txq
= netdev_get_tx_queue(dev
, 0);
1024 if (!netif_is_multiqueue(dev
) ||
1025 dev
->priv_flags
& IFF_NO_QUEUE
) {
1026 netdev_for_each_tx_queue(dev
, attach_one_default_qdisc
, NULL
);
1027 dev
->qdisc
= txq
->qdisc_sleeping
;
1028 qdisc_refcount_inc(dev
->qdisc
);
1030 qdisc
= qdisc_create_dflt(txq
, &mq_qdisc_ops
, TC_H_ROOT
, NULL
);
1033 qdisc
->ops
->attach(qdisc
);
1036 #ifdef CONFIG_NET_SCHED
1037 if (dev
->qdisc
!= &noop_qdisc
)
1038 qdisc_hash_add(dev
->qdisc
, false);
1042 static void transition_one_qdisc(struct net_device
*dev
,
1043 struct netdev_queue
*dev_queue
,
1044 void *_need_watchdog
)
1046 struct Qdisc
*new_qdisc
= dev_queue
->qdisc_sleeping
;
1047 int *need_watchdog_p
= _need_watchdog
;
1049 if (!(new_qdisc
->flags
& TCQ_F_BUILTIN
))
1050 clear_bit(__QDISC_STATE_DEACTIVATED
, &new_qdisc
->state
);
1052 rcu_assign_pointer(dev_queue
->qdisc
, new_qdisc
);
1053 if (need_watchdog_p
) {
1054 dev_queue
->trans_start
= 0;
1055 *need_watchdog_p
= 1;
1059 void dev_activate(struct net_device
*dev
)
1063 /* No queueing discipline is attached to device;
1064 * create default one for devices, which need queueing
1065 * and noqueue_qdisc for virtual interfaces
1068 if (dev
->qdisc
== &noop_qdisc
)
1069 attach_default_qdiscs(dev
);
1071 if (!netif_carrier_ok(dev
))
1072 /* Delay activation until next carrier-on event */
1076 netdev_for_each_tx_queue(dev
, transition_one_qdisc
, &need_watchdog
);
1077 if (dev_ingress_queue(dev
))
1078 transition_one_qdisc(dev
, dev_ingress_queue(dev
), NULL
);
1080 if (need_watchdog
) {
1081 netif_trans_update(dev
);
1082 dev_watchdog_up(dev
);
1085 EXPORT_SYMBOL(dev_activate
);
1087 static void dev_deactivate_queue(struct net_device
*dev
,
1088 struct netdev_queue
*dev_queue
,
1089 void *_qdisc_default
)
1091 struct Qdisc
*qdisc_default
= _qdisc_default
;
1092 struct Qdisc
*qdisc
;
1094 qdisc
= rtnl_dereference(dev_queue
->qdisc
);
1096 spin_lock_bh(qdisc_lock(qdisc
));
1098 if (!(qdisc
->flags
& TCQ_F_BUILTIN
))
1099 set_bit(__QDISC_STATE_DEACTIVATED
, &qdisc
->state
);
1101 rcu_assign_pointer(dev_queue
->qdisc
, qdisc_default
);
1104 spin_unlock_bh(qdisc_lock(qdisc
));
1108 static bool some_qdisc_is_busy(struct net_device
*dev
)
1112 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1113 struct netdev_queue
*dev_queue
;
1114 spinlock_t
*root_lock
;
1118 dev_queue
= netdev_get_tx_queue(dev
, i
);
1119 q
= dev_queue
->qdisc_sleeping
;
1121 if (q
->flags
& TCQ_F_NOLOCK
) {
1122 val
= test_bit(__QDISC_STATE_SCHED
, &q
->state
);
1124 root_lock
= qdisc_lock(q
);
1125 spin_lock_bh(root_lock
);
1127 val
= (qdisc_is_running(q
) ||
1128 test_bit(__QDISC_STATE_SCHED
, &q
->state
));
1130 spin_unlock_bh(root_lock
);
1139 static void dev_qdisc_reset(struct net_device
*dev
,
1140 struct netdev_queue
*dev_queue
,
1143 struct Qdisc
*qdisc
= dev_queue
->qdisc_sleeping
;
1150 * dev_deactivate_many - deactivate transmissions on several devices
1151 * @head: list of devices to deactivate
1153 * This function returns only when all outstanding transmissions
1154 * have completed, unless all devices are in dismantle phase.
1156 void dev_deactivate_many(struct list_head
*head
)
1158 struct net_device
*dev
;
1160 list_for_each_entry(dev
, head
, close_list
) {
1161 netdev_for_each_tx_queue(dev
, dev_deactivate_queue
,
1163 if (dev_ingress_queue(dev
))
1164 dev_deactivate_queue(dev
, dev_ingress_queue(dev
),
1167 dev_watchdog_down(dev
);
1170 /* Wait for outstanding qdisc-less dev_queue_xmit calls.
1171 * This is avoided if all devices are in dismantle phase :
1172 * Caller will call synchronize_net() for us
1176 /* Wait for outstanding qdisc_run calls. */
1177 list_for_each_entry(dev
, head
, close_list
) {
1178 while (some_qdisc_is_busy(dev
))
1180 /* The new qdisc is assigned at this point so we can safely
1181 * unwind stale skb lists and qdisc statistics
1183 netdev_for_each_tx_queue(dev
, dev_qdisc_reset
, NULL
);
1184 if (dev_ingress_queue(dev
))
1185 dev_qdisc_reset(dev
, dev_ingress_queue(dev
), NULL
);
1189 void dev_deactivate(struct net_device
*dev
)
1193 list_add(&dev
->close_list
, &single
);
1194 dev_deactivate_many(&single
);
1197 EXPORT_SYMBOL(dev_deactivate
);
1199 static int qdisc_change_tx_queue_len(struct net_device
*dev
,
1200 struct netdev_queue
*dev_queue
)
1202 struct Qdisc
*qdisc
= dev_queue
->qdisc_sleeping
;
1203 const struct Qdisc_ops
*ops
= qdisc
->ops
;
1205 if (ops
->change_tx_queue_len
)
1206 return ops
->change_tx_queue_len(qdisc
, dev
->tx_queue_len
);
1210 int dev_qdisc_change_tx_queue_len(struct net_device
*dev
)
1212 bool up
= dev
->flags
& IFF_UP
;
1217 dev_deactivate(dev
);
1219 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1220 ret
= qdisc_change_tx_queue_len(dev
, &dev
->_tx
[i
]);
1222 /* TODO: revert changes on a partial failure */
1232 static void dev_init_scheduler_queue(struct net_device
*dev
,
1233 struct netdev_queue
*dev_queue
,
1236 struct Qdisc
*qdisc
= _qdisc
;
1238 rcu_assign_pointer(dev_queue
->qdisc
, qdisc
);
1239 dev_queue
->qdisc_sleeping
= qdisc
;
1240 __skb_queue_head_init(&qdisc
->gso_skb
);
1241 __skb_queue_head_init(&qdisc
->skb_bad_txq
);
1244 void dev_init_scheduler(struct net_device
*dev
)
1246 dev
->qdisc
= &noop_qdisc
;
1247 netdev_for_each_tx_queue(dev
, dev_init_scheduler_queue
, &noop_qdisc
);
1248 if (dev_ingress_queue(dev
))
1249 dev_init_scheduler_queue(dev
, dev_ingress_queue(dev
), &noop_qdisc
);
1251 timer_setup(&dev
->watchdog_timer
, dev_watchdog
, 0);
1254 static void shutdown_scheduler_queue(struct net_device
*dev
,
1255 struct netdev_queue
*dev_queue
,
1256 void *_qdisc_default
)
1258 struct Qdisc
*qdisc
= dev_queue
->qdisc_sleeping
;
1259 struct Qdisc
*qdisc_default
= _qdisc_default
;
1262 rcu_assign_pointer(dev_queue
->qdisc
, qdisc_default
);
1263 dev_queue
->qdisc_sleeping
= qdisc_default
;
1265 qdisc_destroy(qdisc
);
1269 void dev_shutdown(struct net_device
*dev
)
1271 netdev_for_each_tx_queue(dev
, shutdown_scheduler_queue
, &noop_qdisc
);
1272 if (dev_ingress_queue(dev
))
1273 shutdown_scheduler_queue(dev
, dev_ingress_queue(dev
), &noop_qdisc
);
1274 qdisc_destroy(dev
->qdisc
);
1275 dev
->qdisc
= &noop_qdisc
;
1277 WARN_ON(timer_pending(&dev
->watchdog_timer
));
1280 void psched_ratecfg_precompute(struct psched_ratecfg
*r
,
1281 const struct tc_ratespec
*conf
,
1284 memset(r
, 0, sizeof(*r
));
1285 r
->overhead
= conf
->overhead
;
1286 r
->rate_bytes_ps
= max_t(u64
, conf
->rate
, rate64
);
1287 r
->linklayer
= (conf
->linklayer
& TC_LINKLAYER_MASK
);
1290 * The deal here is to replace a divide by a reciprocal one
1291 * in fast path (a reciprocal divide is a multiply and a shift)
1293 * Normal formula would be :
1294 * time_in_ns = (NSEC_PER_SEC * len) / rate_bps
1296 * We compute mult/shift to use instead :
1297 * time_in_ns = (len * mult) >> shift;
1299 * We try to get the highest possible mult value for accuracy,
1300 * but have to make sure no overflows will ever happen.
1302 if (r
->rate_bytes_ps
> 0) {
1303 u64 factor
= NSEC_PER_SEC
;
1306 r
->mult
= div64_u64(factor
, r
->rate_bytes_ps
);
1307 if (r
->mult
& (1U << 31) || factor
& (1ULL << 63))
1314 EXPORT_SYMBOL(psched_ratecfg_precompute
);
1316 static void mini_qdisc_rcu_func(struct rcu_head
*head
)
1320 void mini_qdisc_pair_swap(struct mini_Qdisc_pair
*miniqp
,
1321 struct tcf_proto
*tp_head
)
1323 struct mini_Qdisc
*miniq_old
= rtnl_dereference(*miniqp
->p_miniq
);
1324 struct mini_Qdisc
*miniq
;
1327 RCU_INIT_POINTER(*miniqp
->p_miniq
, NULL
);
1328 /* Wait for flying RCU callback before it is freed. */
1333 miniq
= !miniq_old
|| miniq_old
== &miniqp
->miniq2
?
1334 &miniqp
->miniq1
: &miniqp
->miniq2
;
1336 /* We need to make sure that readers won't see the miniq
1337 * we are about to modify. So wait until previous call_rcu_bh callback
1341 miniq
->filter_list
= tp_head
;
1342 rcu_assign_pointer(*miniqp
->p_miniq
, miniq
);
1345 /* This is counterpart of the rcu barriers above. We need to
1346 * block potential new user of miniq_old until all readers
1347 * are not seeing it.
1349 call_rcu_bh(&miniq_old
->rcu
, mini_qdisc_rcu_func
);
1351 EXPORT_SYMBOL(mini_qdisc_pair_swap
);
1353 void mini_qdisc_pair_init(struct mini_Qdisc_pair
*miniqp
, struct Qdisc
*qdisc
,
1354 struct mini_Qdisc __rcu
**p_miniq
)
1356 miniqp
->miniq1
.cpu_bstats
= qdisc
->cpu_bstats
;
1357 miniqp
->miniq1
.cpu_qstats
= qdisc
->cpu_qstats
;
1358 miniqp
->miniq2
.cpu_bstats
= qdisc
->cpu_bstats
;
1359 miniqp
->miniq2
.cpu_qstats
= qdisc
->cpu_qstats
;
1360 miniqp
->p_miniq
= p_miniq
;
1362 EXPORT_SYMBOL(mini_qdisc_pair_init
);