2 * net/sched/sch_generic.c Generic packet scheduler routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601
14 #include <linux/bitops.h>
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/rtnetlink.h>
24 #include <linux/init.h>
25 #include <linux/rcupdate.h>
26 #include <linux/list.h>
27 #include <linux/slab.h>
28 #include <linux/if_vlan.h>
29 #include <linux/skb_array.h>
30 #include <linux/if_macvlan.h>
31 #include <net/sch_generic.h>
32 #include <net/pkt_sched.h>
34 #include <trace/events/qdisc.h>
37 /* Qdisc to use by default */
38 const struct Qdisc_ops
*default_qdisc_ops
= &pfifo_fast_ops
;
39 EXPORT_SYMBOL(default_qdisc_ops
);
41 /* Main transmission queue. */
43 /* Modifications to data participating in scheduling must be protected with
44 * qdisc_lock(qdisc) spinlock.
46 * The idea is the following:
47 * - enqueue, dequeue are serialized via qdisc root lock
48 * - ingress filtering is also serialized via qdisc root lock
49 * - updates to tree and tree walking are only done under the rtnl mutex.
52 static inline struct sk_buff
*__skb_dequeue_bad_txq(struct Qdisc
*q
)
54 const struct netdev_queue
*txq
= q
->dev_queue
;
55 spinlock_t
*lock
= NULL
;
58 if (q
->flags
& TCQ_F_NOLOCK
) {
63 skb
= skb_peek(&q
->skb_bad_txq
);
65 /* check the reason of requeuing without tx lock first */
66 txq
= skb_get_tx_queue(txq
->dev
, skb
);
67 if (!netif_xmit_frozen_or_stopped(txq
)) {
68 skb
= __skb_dequeue(&q
->skb_bad_txq
);
69 if (qdisc_is_percpu_stats(q
)) {
70 qdisc_qstats_cpu_backlog_dec(q
, skb
);
71 qdisc_qstats_cpu_qlen_dec(q
);
73 qdisc_qstats_backlog_dec(q
, skb
);
87 static inline struct sk_buff
*qdisc_dequeue_skb_bad_txq(struct Qdisc
*q
)
89 struct sk_buff
*skb
= skb_peek(&q
->skb_bad_txq
);
92 skb
= __skb_dequeue_bad_txq(q
);
97 static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc
*q
,
100 spinlock_t
*lock
= NULL
;
102 if (q
->flags
& TCQ_F_NOLOCK
) {
103 lock
= qdisc_lock(q
);
107 __skb_queue_tail(&q
->skb_bad_txq
, skb
);
109 if (qdisc_is_percpu_stats(q
)) {
110 qdisc_qstats_cpu_backlog_inc(q
, skb
);
111 qdisc_qstats_cpu_qlen_inc(q
);
113 qdisc_qstats_backlog_inc(q
, skb
);
121 static inline int __dev_requeue_skb(struct sk_buff
*skb
, struct Qdisc
*q
)
124 struct sk_buff
*next
= skb
->next
;
126 __skb_queue_tail(&q
->gso_skb
, skb
);
127 q
->qstats
.requeues
++;
128 qdisc_qstats_backlog_inc(q
, skb
);
129 q
->q
.qlen
++; /* it's still part of the queue */
138 static inline int dev_requeue_skb_locked(struct sk_buff
*skb
, struct Qdisc
*q
)
140 spinlock_t
*lock
= qdisc_lock(q
);
144 struct sk_buff
*next
= skb
->next
;
146 __skb_queue_tail(&q
->gso_skb
, skb
);
148 qdisc_qstats_cpu_requeues_inc(q
);
149 qdisc_qstats_cpu_backlog_inc(q
, skb
);
150 qdisc_qstats_cpu_qlen_inc(q
);
161 static inline int dev_requeue_skb(struct sk_buff
*skb
, struct Qdisc
*q
)
163 if (q
->flags
& TCQ_F_NOLOCK
)
164 return dev_requeue_skb_locked(skb
, q
);
166 return __dev_requeue_skb(skb
, q
);
169 static void try_bulk_dequeue_skb(struct Qdisc
*q
,
171 const struct netdev_queue
*txq
,
174 int bytelimit
= qdisc_avail_bulklimit(txq
) - skb
->len
;
176 while (bytelimit
> 0) {
177 struct sk_buff
*nskb
= q
->dequeue(q
);
182 bytelimit
-= nskb
->len
; /* covers GSO len */
185 (*packets
)++; /* GSO counts as one pkt */
190 /* This variant of try_bulk_dequeue_skb() makes sure
191 * all skbs in the chain are for the same txq
193 static void try_bulk_dequeue_skb_slow(struct Qdisc
*q
,
197 int mapping
= skb_get_queue_mapping(skb
);
198 struct sk_buff
*nskb
;
202 nskb
= q
->dequeue(q
);
205 if (unlikely(skb_get_queue_mapping(nskb
) != mapping
)) {
206 qdisc_enqueue_skb_bad_txq(q
, nskb
);
216 /* Note that dequeue_skb can possibly return a SKB list (via skb->next).
217 * A requeued skb (via q->gso_skb) can also be a SKB list.
219 static struct sk_buff
*dequeue_skb(struct Qdisc
*q
, bool *validate
,
222 const struct netdev_queue
*txq
= q
->dev_queue
;
223 struct sk_buff
*skb
= NULL
;
226 if (unlikely(!skb_queue_empty(&q
->gso_skb
))) {
227 spinlock_t
*lock
= NULL
;
229 if (q
->flags
& TCQ_F_NOLOCK
) {
230 lock
= qdisc_lock(q
);
234 skb
= skb_peek(&q
->gso_skb
);
236 /* skb may be null if another cpu pulls gso_skb off in between
237 * empty check and lock.
245 /* skb in gso_skb were already validated */
247 if (xfrm_offload(skb
))
249 /* check the reason of requeuing without tx lock first */
250 txq
= skb_get_tx_queue(txq
->dev
, skb
);
251 if (!netif_xmit_frozen_or_stopped(txq
)) {
252 skb
= __skb_dequeue(&q
->gso_skb
);
253 if (qdisc_is_percpu_stats(q
)) {
254 qdisc_qstats_cpu_backlog_dec(q
, skb
);
255 qdisc_qstats_cpu_qlen_dec(q
);
257 qdisc_qstats_backlog_dec(q
, skb
);
270 if ((q
->flags
& TCQ_F_ONETXQUEUE
) &&
271 netif_xmit_frozen_or_stopped(txq
))
274 skb
= qdisc_dequeue_skb_bad_txq(q
);
280 if (qdisc_may_bulk(q
))
281 try_bulk_dequeue_skb(q
, skb
, txq
, packets
);
283 try_bulk_dequeue_skb_slow(q
, skb
, packets
);
286 trace_qdisc_dequeue(q
, txq
, *packets
, skb
);
291 * Transmit possibly several skbs, and handle the return status as
292 * required. Owning running seqcount bit guarantees that
293 * only one CPU can execute this function.
295 * Returns to the caller:
296 * false - hardware queue frozen backoff
297 * true - feel free to send more pkts
299 bool sch_direct_xmit(struct sk_buff
*skb
, struct Qdisc
*q
,
300 struct net_device
*dev
, struct netdev_queue
*txq
,
301 spinlock_t
*root_lock
, bool validate
)
303 int ret
= NETDEV_TX_BUSY
;
306 /* And release qdisc */
308 spin_unlock(root_lock
);
310 /* Note that we validate skb (GSO, checksum, ...) outside of locks */
312 skb
= validate_xmit_skb_list(skb
, dev
, &again
);
314 #ifdef CONFIG_XFRM_OFFLOAD
315 if (unlikely(again
)) {
317 spin_lock(root_lock
);
319 dev_requeue_skb(skb
, q
);
325 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
326 if (!netif_xmit_frozen_or_stopped(txq
))
327 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &ret
);
329 HARD_TX_UNLOCK(dev
, txq
);
332 spin_lock(root_lock
);
337 spin_lock(root_lock
);
339 if (!dev_xmit_complete(ret
)) {
340 /* Driver returned NETDEV_TX_BUSY - requeue skb */
341 if (unlikely(ret
!= NETDEV_TX_BUSY
))
342 net_warn_ratelimited("BUG %s code %d qlen %d\n",
343 dev
->name
, ret
, q
->q
.qlen
);
345 dev_requeue_skb(skb
, q
);
353 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
355 * running seqcount guarantees only one CPU can process
356 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
359 * netif_tx_lock serializes accesses to device driver.
361 * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
362 * if one is grabbed, another must be free.
364 * Note, that this procedure can be called by a watchdog timer
366 * Returns to the caller:
367 * 0 - queue is empty or throttled.
368 * >0 - queue is not empty.
371 static inline bool qdisc_restart(struct Qdisc
*q
, int *packets
)
373 spinlock_t
*root_lock
= NULL
;
374 struct netdev_queue
*txq
;
375 struct net_device
*dev
;
380 skb
= dequeue_skb(q
, &validate
, packets
);
384 if (!(q
->flags
& TCQ_F_NOLOCK
))
385 root_lock
= qdisc_lock(q
);
388 txq
= skb_get_tx_queue(dev
, skb
);
390 return sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
, validate
);
393 void __qdisc_run(struct Qdisc
*q
)
395 int quota
= dev_tx_weight
;
398 while (qdisc_restart(q
, &packets
)) {
400 * Ordered by possible occurrence: Postpone processing if
401 * 1. we've exceeded packet quota
402 * 2. another process needs the CPU;
405 if (quota
<= 0 || need_resched()) {
412 unsigned long dev_trans_start(struct net_device
*dev
)
414 unsigned long val
, res
;
417 if (is_vlan_dev(dev
))
418 dev
= vlan_dev_real_dev(dev
);
419 else if (netif_is_macvlan(dev
))
420 dev
= macvlan_dev_real_dev(dev
);
421 res
= netdev_get_tx_queue(dev
, 0)->trans_start
;
422 for (i
= 1; i
< dev
->num_tx_queues
; i
++) {
423 val
= netdev_get_tx_queue(dev
, i
)->trans_start
;
424 if (val
&& time_after(val
, res
))
430 EXPORT_SYMBOL(dev_trans_start
);
432 static void dev_watchdog(struct timer_list
*t
)
434 struct net_device
*dev
= from_timer(dev
, t
, watchdog_timer
);
437 if (!qdisc_tx_is_noop(dev
)) {
438 if (netif_device_present(dev
) &&
439 netif_running(dev
) &&
440 netif_carrier_ok(dev
)) {
441 int some_queue_timedout
= 0;
443 unsigned long trans_start
;
445 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
446 struct netdev_queue
*txq
;
448 txq
= netdev_get_tx_queue(dev
, i
);
449 trans_start
= txq
->trans_start
;
450 if (netif_xmit_stopped(txq
) &&
451 time_after(jiffies
, (trans_start
+
452 dev
->watchdog_timeo
))) {
453 some_queue_timedout
= 1;
454 txq
->trans_timeout
++;
459 if (some_queue_timedout
) {
460 WARN_ONCE(1, KERN_INFO
"NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
461 dev
->name
, netdev_drivername(dev
), i
);
462 dev
->netdev_ops
->ndo_tx_timeout(dev
);
464 if (!mod_timer(&dev
->watchdog_timer
,
465 round_jiffies(jiffies
+
466 dev
->watchdog_timeo
)))
470 netif_tx_unlock(dev
);
475 void __netdev_watchdog_up(struct net_device
*dev
)
477 if (dev
->netdev_ops
->ndo_tx_timeout
) {
478 if (dev
->watchdog_timeo
<= 0)
479 dev
->watchdog_timeo
= 5*HZ
;
480 if (!mod_timer(&dev
->watchdog_timer
,
481 round_jiffies(jiffies
+ dev
->watchdog_timeo
)))
486 static void dev_watchdog_up(struct net_device
*dev
)
488 __netdev_watchdog_up(dev
);
491 static void dev_watchdog_down(struct net_device
*dev
)
493 netif_tx_lock_bh(dev
);
494 if (del_timer(&dev
->watchdog_timer
))
496 netif_tx_unlock_bh(dev
);
500 * netif_carrier_on - set carrier
501 * @dev: network device
503 * Device has detected that carrier.
505 void netif_carrier_on(struct net_device
*dev
)
507 if (test_and_clear_bit(__LINK_STATE_NOCARRIER
, &dev
->state
)) {
508 if (dev
->reg_state
== NETREG_UNINITIALIZED
)
510 atomic_inc(&dev
->carrier_up_count
);
511 linkwatch_fire_event(dev
);
512 if (netif_running(dev
))
513 __netdev_watchdog_up(dev
);
516 EXPORT_SYMBOL(netif_carrier_on
);
519 * netif_carrier_off - clear carrier
520 * @dev: network device
522 * Device has detected loss of carrier.
524 void netif_carrier_off(struct net_device
*dev
)
526 if (!test_and_set_bit(__LINK_STATE_NOCARRIER
, &dev
->state
)) {
527 if (dev
->reg_state
== NETREG_UNINITIALIZED
)
529 atomic_inc(&dev
->carrier_down_count
);
530 linkwatch_fire_event(dev
);
533 EXPORT_SYMBOL(netif_carrier_off
);
535 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
536 under all circumstances. It is difficult to invent anything faster or
540 static int noop_enqueue(struct sk_buff
*skb
, struct Qdisc
*qdisc
,
541 struct sk_buff
**to_free
)
543 __qdisc_drop(skb
, to_free
);
547 static struct sk_buff
*noop_dequeue(struct Qdisc
*qdisc
)
552 struct Qdisc_ops noop_qdisc_ops __read_mostly
= {
555 .enqueue
= noop_enqueue
,
556 .dequeue
= noop_dequeue
,
557 .peek
= noop_dequeue
,
558 .owner
= THIS_MODULE
,
561 static struct netdev_queue noop_netdev_queue
= {
562 .qdisc
= &noop_qdisc
,
563 .qdisc_sleeping
= &noop_qdisc
,
566 struct Qdisc noop_qdisc
= {
567 .enqueue
= noop_enqueue
,
568 .dequeue
= noop_dequeue
,
569 .flags
= TCQ_F_BUILTIN
,
570 .ops
= &noop_qdisc_ops
,
571 .q
.lock
= __SPIN_LOCK_UNLOCKED(noop_qdisc
.q
.lock
),
572 .dev_queue
= &noop_netdev_queue
,
573 .running
= SEQCNT_ZERO(noop_qdisc
.running
),
574 .busylock
= __SPIN_LOCK_UNLOCKED(noop_qdisc
.busylock
),
576 EXPORT_SYMBOL(noop_qdisc
);
578 static int noqueue_init(struct Qdisc
*qdisc
, struct nlattr
*opt
,
579 struct netlink_ext_ack
*extack
)
581 /* register_qdisc() assigns a default of noop_enqueue if unset,
582 * but __dev_queue_xmit() treats noqueue only as such
583 * if this is NULL - so clear it here. */
584 qdisc
->enqueue
= NULL
;
588 struct Qdisc_ops noqueue_qdisc_ops __read_mostly
= {
591 .init
= noqueue_init
,
592 .enqueue
= noop_enqueue
,
593 .dequeue
= noop_dequeue
,
594 .peek
= noop_dequeue
,
595 .owner
= THIS_MODULE
,
598 static const u8 prio2band
[TC_PRIO_MAX
+ 1] = {
599 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
602 /* 3-band FIFO queue: old style, but should be a bit faster than
603 generic prio+fifo combination.
606 #define PFIFO_FAST_BANDS 3
609 * Private data for a pfifo_fast scheduler containing:
610 * - rings for priority bands
612 struct pfifo_fast_priv
{
613 struct skb_array q
[PFIFO_FAST_BANDS
];
616 static inline struct skb_array
*band2list(struct pfifo_fast_priv
*priv
,
619 return &priv
->q
[band
];
622 static int pfifo_fast_enqueue(struct sk_buff
*skb
, struct Qdisc
*qdisc
,
623 struct sk_buff
**to_free
)
625 int band
= prio2band
[skb
->priority
& TC_PRIO_MAX
];
626 struct pfifo_fast_priv
*priv
= qdisc_priv(qdisc
);
627 struct skb_array
*q
= band2list(priv
, band
);
628 unsigned int pkt_len
= qdisc_pkt_len(skb
);
631 err
= skb_array_produce(q
, skb
);
634 return qdisc_drop_cpu(skb
, qdisc
, to_free
);
636 qdisc_qstats_cpu_qlen_inc(qdisc
);
637 /* Note: skb can not be used after skb_array_produce(),
638 * so we better not use qdisc_qstats_cpu_backlog_inc()
640 this_cpu_add(qdisc
->cpu_qstats
->backlog
, pkt_len
);
641 return NET_XMIT_SUCCESS
;
644 static struct sk_buff
*pfifo_fast_dequeue(struct Qdisc
*qdisc
)
646 struct pfifo_fast_priv
*priv
= qdisc_priv(qdisc
);
647 struct sk_buff
*skb
= NULL
;
650 for (band
= 0; band
< PFIFO_FAST_BANDS
&& !skb
; band
++) {
651 struct skb_array
*q
= band2list(priv
, band
);
653 if (__skb_array_empty(q
))
656 skb
= __skb_array_consume(q
);
659 qdisc_qstats_cpu_backlog_dec(qdisc
, skb
);
660 qdisc_bstats_cpu_update(qdisc
, skb
);
661 qdisc_qstats_cpu_qlen_dec(qdisc
);
667 static struct sk_buff
*pfifo_fast_peek(struct Qdisc
*qdisc
)
669 struct pfifo_fast_priv
*priv
= qdisc_priv(qdisc
);
670 struct sk_buff
*skb
= NULL
;
673 for (band
= 0; band
< PFIFO_FAST_BANDS
&& !skb
; band
++) {
674 struct skb_array
*q
= band2list(priv
, band
);
676 skb
= __skb_array_peek(q
);
682 static void pfifo_fast_reset(struct Qdisc
*qdisc
)
685 struct pfifo_fast_priv
*priv
= qdisc_priv(qdisc
);
687 for (band
= 0; band
< PFIFO_FAST_BANDS
; band
++) {
688 struct skb_array
*q
= band2list(priv
, band
);
691 /* NULL ring is possible if destroy path is due to a failed
692 * skb_array_init() in pfifo_fast_init() case.
697 while ((skb
= __skb_array_consume(q
)) != NULL
)
701 for_each_possible_cpu(i
) {
702 struct gnet_stats_queue
*q
= per_cpu_ptr(qdisc
->cpu_qstats
, i
);
709 static int pfifo_fast_dump(struct Qdisc
*qdisc
, struct sk_buff
*skb
)
711 struct tc_prio_qopt opt
= { .bands
= PFIFO_FAST_BANDS
};
713 memcpy(&opt
.priomap
, prio2band
, TC_PRIO_MAX
+ 1);
714 if (nla_put(skb
, TCA_OPTIONS
, sizeof(opt
), &opt
))
715 goto nla_put_failure
;
722 static int pfifo_fast_init(struct Qdisc
*qdisc
, struct nlattr
*opt
,
723 struct netlink_ext_ack
*extack
)
725 unsigned int qlen
= qdisc_dev(qdisc
)->tx_queue_len
;
726 struct pfifo_fast_priv
*priv
= qdisc_priv(qdisc
);
729 /* guard against zero length rings */
733 for (prio
= 0; prio
< PFIFO_FAST_BANDS
; prio
++) {
734 struct skb_array
*q
= band2list(priv
, prio
);
737 err
= skb_array_init(q
, qlen
, GFP_KERNEL
);
742 /* Can by-pass the queue discipline */
743 qdisc
->flags
|= TCQ_F_CAN_BYPASS
;
747 static void pfifo_fast_destroy(struct Qdisc
*sch
)
749 struct pfifo_fast_priv
*priv
= qdisc_priv(sch
);
752 for (prio
= 0; prio
< PFIFO_FAST_BANDS
; prio
++) {
753 struct skb_array
*q
= band2list(priv
, prio
);
755 /* NULL ring is possible if destroy path is due to a failed
756 * skb_array_init() in pfifo_fast_init() case.
760 /* Destroy ring but no need to kfree_skb because a call to
761 * pfifo_fast_reset() has already done that work.
763 ptr_ring_cleanup(&q
->ring
, NULL
);
767 static int pfifo_fast_change_tx_queue_len(struct Qdisc
*sch
,
768 unsigned int new_len
)
770 struct pfifo_fast_priv
*priv
= qdisc_priv(sch
);
771 struct skb_array
*bands
[PFIFO_FAST_BANDS
];
774 for (prio
= 0; prio
< PFIFO_FAST_BANDS
; prio
++) {
775 struct skb_array
*q
= band2list(priv
, prio
);
780 return skb_array_resize_multiple(bands
, PFIFO_FAST_BANDS
, new_len
,
784 struct Qdisc_ops pfifo_fast_ops __read_mostly
= {
786 .priv_size
= sizeof(struct pfifo_fast_priv
),
787 .enqueue
= pfifo_fast_enqueue
,
788 .dequeue
= pfifo_fast_dequeue
,
789 .peek
= pfifo_fast_peek
,
790 .init
= pfifo_fast_init
,
791 .destroy
= pfifo_fast_destroy
,
792 .reset
= pfifo_fast_reset
,
793 .dump
= pfifo_fast_dump
,
794 .change_tx_queue_len
= pfifo_fast_change_tx_queue_len
,
795 .owner
= THIS_MODULE
,
796 .static_flags
= TCQ_F_NOLOCK
| TCQ_F_CPUSTATS
,
798 EXPORT_SYMBOL(pfifo_fast_ops
);
800 static struct lock_class_key qdisc_tx_busylock
;
801 static struct lock_class_key qdisc_running_key
;
803 struct Qdisc
*qdisc_alloc(struct netdev_queue
*dev_queue
,
804 const struct Qdisc_ops
*ops
,
805 struct netlink_ext_ack
*extack
)
809 unsigned int size
= QDISC_ALIGN(sizeof(*sch
)) + ops
->priv_size
;
811 struct net_device
*dev
;
814 NL_SET_ERR_MSG(extack
, "No device queue given");
819 dev
= dev_queue
->dev
;
820 p
= kzalloc_node(size
, GFP_KERNEL
,
821 netdev_queue_numa_node_read(dev_queue
));
825 sch
= (struct Qdisc
*) QDISC_ALIGN((unsigned long) p
);
826 /* if we got non aligned memory, ask more and do alignment ourself */
829 p
= kzalloc_node(size
+ QDISC_ALIGNTO
- 1, GFP_KERNEL
,
830 netdev_queue_numa_node_read(dev_queue
));
833 sch
= (struct Qdisc
*) QDISC_ALIGN((unsigned long) p
);
834 sch
->padded
= (char *) sch
- (char *) p
;
836 __skb_queue_head_init(&sch
->gso_skb
);
837 __skb_queue_head_init(&sch
->skb_bad_txq
);
838 qdisc_skb_head_init(&sch
->q
);
839 spin_lock_init(&sch
->q
.lock
);
841 if (ops
->static_flags
& TCQ_F_CPUSTATS
) {
843 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu
);
844 if (!sch
->cpu_bstats
)
847 sch
->cpu_qstats
= alloc_percpu(struct gnet_stats_queue
);
848 if (!sch
->cpu_qstats
) {
849 free_percpu(sch
->cpu_bstats
);
854 spin_lock_init(&sch
->busylock
);
855 lockdep_set_class(&sch
->busylock
,
856 dev
->qdisc_tx_busylock
?: &qdisc_tx_busylock
);
858 /* seqlock has the same scope of busylock, for NOLOCK qdisc */
859 spin_lock_init(&sch
->seqlock
);
860 lockdep_set_class(&sch
->busylock
,
861 dev
->qdisc_tx_busylock
?: &qdisc_tx_busylock
);
863 seqcount_init(&sch
->running
);
864 lockdep_set_class(&sch
->running
,
865 dev
->qdisc_running_key
?: &qdisc_running_key
);
868 sch
->flags
= ops
->static_flags
;
869 sch
->enqueue
= ops
->enqueue
;
870 sch
->dequeue
= ops
->dequeue
;
871 sch
->dev_queue
= dev_queue
;
873 refcount_set(&sch
->refcnt
, 1);
882 struct Qdisc
*qdisc_create_dflt(struct netdev_queue
*dev_queue
,
883 const struct Qdisc_ops
*ops
,
884 unsigned int parentid
,
885 struct netlink_ext_ack
*extack
)
889 if (!try_module_get(ops
->owner
)) {
890 NL_SET_ERR_MSG(extack
, "Failed to increase module reference counter");
894 sch
= qdisc_alloc(dev_queue
, ops
, extack
);
896 module_put(ops
->owner
);
899 sch
->parent
= parentid
;
901 if (!ops
->init
|| ops
->init(sch
, NULL
, extack
) == 0)
907 EXPORT_SYMBOL(qdisc_create_dflt
);
909 /* Under qdisc_lock(qdisc) and BH! */
911 void qdisc_reset(struct Qdisc
*qdisc
)
913 const struct Qdisc_ops
*ops
= qdisc
->ops
;
914 struct sk_buff
*skb
, *tmp
;
919 skb_queue_walk_safe(&qdisc
->gso_skb
, skb
, tmp
) {
920 __skb_unlink(skb
, &qdisc
->gso_skb
);
924 skb_queue_walk_safe(&qdisc
->skb_bad_txq
, skb
, tmp
) {
925 __skb_unlink(skb
, &qdisc
->skb_bad_txq
);
930 qdisc
->qstats
.backlog
= 0;
932 EXPORT_SYMBOL(qdisc_reset
);
934 void qdisc_free(struct Qdisc
*qdisc
)
936 if (qdisc_is_percpu_stats(qdisc
)) {
937 free_percpu(qdisc
->cpu_bstats
);
938 free_percpu(qdisc
->cpu_qstats
);
941 kfree((char *) qdisc
- qdisc
->padded
);
944 void qdisc_destroy(struct Qdisc
*qdisc
)
946 const struct Qdisc_ops
*ops
= qdisc
->ops
;
947 struct sk_buff
*skb
, *tmp
;
949 if (qdisc
->flags
& TCQ_F_BUILTIN
||
950 !refcount_dec_and_test(&qdisc
->refcnt
))
953 #ifdef CONFIG_NET_SCHED
954 qdisc_hash_del(qdisc
);
956 qdisc_put_stab(rtnl_dereference(qdisc
->stab
));
958 gen_kill_estimator(&qdisc
->rate_est
);
964 module_put(ops
->owner
);
965 dev_put(qdisc_dev(qdisc
));
967 skb_queue_walk_safe(&qdisc
->gso_skb
, skb
, tmp
) {
968 __skb_unlink(skb
, &qdisc
->gso_skb
);
972 skb_queue_walk_safe(&qdisc
->skb_bad_txq
, skb
, tmp
) {
973 __skb_unlink(skb
, &qdisc
->skb_bad_txq
);
979 EXPORT_SYMBOL(qdisc_destroy
);
981 /* Attach toplevel qdisc to device queue. */
982 struct Qdisc
*dev_graft_qdisc(struct netdev_queue
*dev_queue
,
985 struct Qdisc
*oqdisc
= dev_queue
->qdisc_sleeping
;
986 spinlock_t
*root_lock
;
988 root_lock
= qdisc_lock(oqdisc
);
989 spin_lock_bh(root_lock
);
991 /* ... and graft new one */
994 dev_queue
->qdisc_sleeping
= qdisc
;
995 rcu_assign_pointer(dev_queue
->qdisc
, &noop_qdisc
);
997 spin_unlock_bh(root_lock
);
1001 EXPORT_SYMBOL(dev_graft_qdisc
);
1003 static void attach_one_default_qdisc(struct net_device
*dev
,
1004 struct netdev_queue
*dev_queue
,
1007 struct Qdisc
*qdisc
;
1008 const struct Qdisc_ops
*ops
= default_qdisc_ops
;
1010 if (dev
->priv_flags
& IFF_NO_QUEUE
)
1011 ops
= &noqueue_qdisc_ops
;
1013 qdisc
= qdisc_create_dflt(dev_queue
, ops
, TC_H_ROOT
, NULL
);
1015 netdev_info(dev
, "activation failed\n");
1018 if (!netif_is_multiqueue(dev
))
1019 qdisc
->flags
|= TCQ_F_ONETXQUEUE
| TCQ_F_NOPARENT
;
1020 dev_queue
->qdisc_sleeping
= qdisc
;
1023 static void attach_default_qdiscs(struct net_device
*dev
)
1025 struct netdev_queue
*txq
;
1026 struct Qdisc
*qdisc
;
1028 txq
= netdev_get_tx_queue(dev
, 0);
1030 if (!netif_is_multiqueue(dev
) ||
1031 dev
->priv_flags
& IFF_NO_QUEUE
) {
1032 netdev_for_each_tx_queue(dev
, attach_one_default_qdisc
, NULL
);
1033 dev
->qdisc
= txq
->qdisc_sleeping
;
1034 qdisc_refcount_inc(dev
->qdisc
);
1036 qdisc
= qdisc_create_dflt(txq
, &mq_qdisc_ops
, TC_H_ROOT
, NULL
);
1039 qdisc
->ops
->attach(qdisc
);
1042 #ifdef CONFIG_NET_SCHED
1043 if (dev
->qdisc
!= &noop_qdisc
)
1044 qdisc_hash_add(dev
->qdisc
, false);
1048 static void transition_one_qdisc(struct net_device
*dev
,
1049 struct netdev_queue
*dev_queue
,
1050 void *_need_watchdog
)
1052 struct Qdisc
*new_qdisc
= dev_queue
->qdisc_sleeping
;
1053 int *need_watchdog_p
= _need_watchdog
;
1055 if (!(new_qdisc
->flags
& TCQ_F_BUILTIN
))
1056 clear_bit(__QDISC_STATE_DEACTIVATED
, &new_qdisc
->state
);
1058 rcu_assign_pointer(dev_queue
->qdisc
, new_qdisc
);
1059 if (need_watchdog_p
) {
1060 dev_queue
->trans_start
= 0;
1061 *need_watchdog_p
= 1;
1065 void dev_activate(struct net_device
*dev
)
1069 /* No queueing discipline is attached to device;
1070 * create default one for devices, which need queueing
1071 * and noqueue_qdisc for virtual interfaces
1074 if (dev
->qdisc
== &noop_qdisc
)
1075 attach_default_qdiscs(dev
);
1077 if (!netif_carrier_ok(dev
))
1078 /* Delay activation until next carrier-on event */
1082 netdev_for_each_tx_queue(dev
, transition_one_qdisc
, &need_watchdog
);
1083 if (dev_ingress_queue(dev
))
1084 transition_one_qdisc(dev
, dev_ingress_queue(dev
), NULL
);
1086 if (need_watchdog
) {
1087 netif_trans_update(dev
);
1088 dev_watchdog_up(dev
);
1091 EXPORT_SYMBOL(dev_activate
);
1093 static void dev_deactivate_queue(struct net_device
*dev
,
1094 struct netdev_queue
*dev_queue
,
1095 void *_qdisc_default
)
1097 struct Qdisc
*qdisc_default
= _qdisc_default
;
1098 struct Qdisc
*qdisc
;
1100 qdisc
= rtnl_dereference(dev_queue
->qdisc
);
1102 bool nolock
= qdisc
->flags
& TCQ_F_NOLOCK
;
1105 spin_lock_bh(&qdisc
->seqlock
);
1106 spin_lock_bh(qdisc_lock(qdisc
));
1108 if (!(qdisc
->flags
& TCQ_F_BUILTIN
))
1109 set_bit(__QDISC_STATE_DEACTIVATED
, &qdisc
->state
);
1111 rcu_assign_pointer(dev_queue
->qdisc
, qdisc_default
);
1114 spin_unlock_bh(qdisc_lock(qdisc
));
1116 spin_unlock_bh(&qdisc
->seqlock
);
1120 static bool some_qdisc_is_busy(struct net_device
*dev
)
1124 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1125 struct netdev_queue
*dev_queue
;
1126 spinlock_t
*root_lock
;
1130 dev_queue
= netdev_get_tx_queue(dev
, i
);
1131 q
= dev_queue
->qdisc_sleeping
;
1133 root_lock
= qdisc_lock(q
);
1134 spin_lock_bh(root_lock
);
1136 val
= (qdisc_is_running(q
) ||
1137 test_bit(__QDISC_STATE_SCHED
, &q
->state
));
1139 spin_unlock_bh(root_lock
);
1147 static void dev_qdisc_reset(struct net_device
*dev
,
1148 struct netdev_queue
*dev_queue
,
1151 struct Qdisc
*qdisc
= dev_queue
->qdisc_sleeping
;
1158 * dev_deactivate_many - deactivate transmissions on several devices
1159 * @head: list of devices to deactivate
1161 * This function returns only when all outstanding transmissions
1162 * have completed, unless all devices are in dismantle phase.
1164 void dev_deactivate_many(struct list_head
*head
)
1166 struct net_device
*dev
;
1168 list_for_each_entry(dev
, head
, close_list
) {
1169 netdev_for_each_tx_queue(dev
, dev_deactivate_queue
,
1171 if (dev_ingress_queue(dev
))
1172 dev_deactivate_queue(dev
, dev_ingress_queue(dev
),
1175 dev_watchdog_down(dev
);
1178 /* Wait for outstanding qdisc-less dev_queue_xmit calls.
1179 * This is avoided if all devices are in dismantle phase :
1180 * Caller will call synchronize_net() for us
1184 /* Wait for outstanding qdisc_run calls. */
1185 list_for_each_entry(dev
, head
, close_list
) {
1186 while (some_qdisc_is_busy(dev
))
1188 /* The new qdisc is assigned at this point so we can safely
1189 * unwind stale skb lists and qdisc statistics
1191 netdev_for_each_tx_queue(dev
, dev_qdisc_reset
, NULL
);
1192 if (dev_ingress_queue(dev
))
1193 dev_qdisc_reset(dev
, dev_ingress_queue(dev
), NULL
);
1197 void dev_deactivate(struct net_device
*dev
)
1201 list_add(&dev
->close_list
, &single
);
1202 dev_deactivate_many(&single
);
1205 EXPORT_SYMBOL(dev_deactivate
);
1207 static int qdisc_change_tx_queue_len(struct net_device
*dev
,
1208 struct netdev_queue
*dev_queue
)
1210 struct Qdisc
*qdisc
= dev_queue
->qdisc_sleeping
;
1211 const struct Qdisc_ops
*ops
= qdisc
->ops
;
1213 if (ops
->change_tx_queue_len
)
1214 return ops
->change_tx_queue_len(qdisc
, dev
->tx_queue_len
);
1218 int dev_qdisc_change_tx_queue_len(struct net_device
*dev
)
1220 bool up
= dev
->flags
& IFF_UP
;
1225 dev_deactivate(dev
);
1227 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1228 ret
= qdisc_change_tx_queue_len(dev
, &dev
->_tx
[i
]);
1230 /* TODO: revert changes on a partial failure */
1240 static void dev_init_scheduler_queue(struct net_device
*dev
,
1241 struct netdev_queue
*dev_queue
,
1244 struct Qdisc
*qdisc
= _qdisc
;
1246 rcu_assign_pointer(dev_queue
->qdisc
, qdisc
);
1247 dev_queue
->qdisc_sleeping
= qdisc
;
1248 __skb_queue_head_init(&qdisc
->gso_skb
);
1249 __skb_queue_head_init(&qdisc
->skb_bad_txq
);
1252 void dev_init_scheduler(struct net_device
*dev
)
1254 dev
->qdisc
= &noop_qdisc
;
1255 netdev_for_each_tx_queue(dev
, dev_init_scheduler_queue
, &noop_qdisc
);
1256 if (dev_ingress_queue(dev
))
1257 dev_init_scheduler_queue(dev
, dev_ingress_queue(dev
), &noop_qdisc
);
1259 timer_setup(&dev
->watchdog_timer
, dev_watchdog
, 0);
1262 static void shutdown_scheduler_queue(struct net_device
*dev
,
1263 struct netdev_queue
*dev_queue
,
1264 void *_qdisc_default
)
1266 struct Qdisc
*qdisc
= dev_queue
->qdisc_sleeping
;
1267 struct Qdisc
*qdisc_default
= _qdisc_default
;
1270 rcu_assign_pointer(dev_queue
->qdisc
, qdisc_default
);
1271 dev_queue
->qdisc_sleeping
= qdisc_default
;
1273 qdisc_destroy(qdisc
);
1277 void dev_shutdown(struct net_device
*dev
)
1279 netdev_for_each_tx_queue(dev
, shutdown_scheduler_queue
, &noop_qdisc
);
1280 if (dev_ingress_queue(dev
))
1281 shutdown_scheduler_queue(dev
, dev_ingress_queue(dev
), &noop_qdisc
);
1282 qdisc_destroy(dev
->qdisc
);
1283 dev
->qdisc
= &noop_qdisc
;
1285 WARN_ON(timer_pending(&dev
->watchdog_timer
));
1288 void psched_ratecfg_precompute(struct psched_ratecfg
*r
,
1289 const struct tc_ratespec
*conf
,
1292 memset(r
, 0, sizeof(*r
));
1293 r
->overhead
= conf
->overhead
;
1294 r
->rate_bytes_ps
= max_t(u64
, conf
->rate
, rate64
);
1295 r
->linklayer
= (conf
->linklayer
& TC_LINKLAYER_MASK
);
1298 * The deal here is to replace a divide by a reciprocal one
1299 * in fast path (a reciprocal divide is a multiply and a shift)
1301 * Normal formula would be :
1302 * time_in_ns = (NSEC_PER_SEC * len) / rate_bps
1304 * We compute mult/shift to use instead :
1305 * time_in_ns = (len * mult) >> shift;
1307 * We try to get the highest possible mult value for accuracy,
1308 * but have to make sure no overflows will ever happen.
1310 if (r
->rate_bytes_ps
> 0) {
1311 u64 factor
= NSEC_PER_SEC
;
1314 r
->mult
= div64_u64(factor
, r
->rate_bytes_ps
);
1315 if (r
->mult
& (1U << 31) || factor
& (1ULL << 63))
1322 EXPORT_SYMBOL(psched_ratecfg_precompute
);
1324 static void mini_qdisc_rcu_func(struct rcu_head
*head
)
1328 void mini_qdisc_pair_swap(struct mini_Qdisc_pair
*miniqp
,
1329 struct tcf_proto
*tp_head
)
1331 struct mini_Qdisc
*miniq_old
= rtnl_dereference(*miniqp
->p_miniq
);
1332 struct mini_Qdisc
*miniq
;
1335 RCU_INIT_POINTER(*miniqp
->p_miniq
, NULL
);
1336 /* Wait for flying RCU callback before it is freed. */
1341 miniq
= !miniq_old
|| miniq_old
== &miniqp
->miniq2
?
1342 &miniqp
->miniq1
: &miniqp
->miniq2
;
1344 /* We need to make sure that readers won't see the miniq
1345 * we are about to modify. So wait until previous call_rcu_bh callback
1349 miniq
->filter_list
= tp_head
;
1350 rcu_assign_pointer(*miniqp
->p_miniq
, miniq
);
1353 /* This is counterpart of the rcu barriers above. We need to
1354 * block potential new user of miniq_old until all readers
1355 * are not seeing it.
1357 call_rcu_bh(&miniq_old
->rcu
, mini_qdisc_rcu_func
);
1359 EXPORT_SYMBOL(mini_qdisc_pair_swap
);
1361 void mini_qdisc_pair_init(struct mini_Qdisc_pair
*miniqp
, struct Qdisc
*qdisc
,
1362 struct mini_Qdisc __rcu
**p_miniq
)
1364 miniqp
->miniq1
.cpu_bstats
= qdisc
->cpu_bstats
;
1365 miniqp
->miniq1
.cpu_qstats
= qdisc
->cpu_qstats
;
1366 miniqp
->miniq2
.cpu_bstats
= qdisc
->cpu_bstats
;
1367 miniqp
->miniq2
.cpu_qstats
= qdisc
->cpu_qstats
;
1368 miniqp
->p_miniq
= p_miniq
;
1370 EXPORT_SYMBOL(mini_qdisc_pair_init
);